mirror of https://gitee.com/openkylin/qemu.git
QOM CPUState refactorings
* Fix for OpenRISCCPU subclasses * Fix for gdbstub CPU selection * Move linux-user CPU functions into new header * CPUState part 10 refactoring: first_cpu, next_cpu, cpu_single_env et al. * Fix some targets to consistently inline TCG code generation * Centrally log CPU reset -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.19 (GNU/Linux) iQIcBAABAgAGBQJR3VkXAAoJEPou0S0+fgE/KFQP/3eUyCzZ6QmUG3gmrnfYRDMH uwMstD1JRUc5kTEC2bMtld8zZKwx2kxMJpe5fizig8GaLka0J5U2wyvwskkX27ag 7ouNwFdD/dOmvaKfcqHYKbA3CTuIrbnMm7nzrXpLnWXCiMlW1XmXttQsb3hoAjjt asFxQIHONNIgqpcJBrz/C6XX2bEkLra4s2QlXPE5Bl3QkKTtK9+NYahHtgIk3Y7Y fqbAxebNGh9eZ9PKjPExhNBZ17Yi4ciM7UB7yrXFYOfwKSpmmTsJdu/m776b1oAK c/zWO0uea+sLsMnibnSD1foeeZJItDQDRid+PjC44zB5kS8pkPcT5+TVB04Zilap rhNF2Fox+fe8eIc/2WuY3ZGchVjrD/EPbFFCCRQ/qI3Nb98WfLCDu3pAP1hRdo+p P6qCH5JmWYcR+2gp8MHY0NtqcklL8A2HpQTRvX1mUliMJbE+unanT4nmKolOTYrm +6jvp72GkmqqaLQDQ0d8ig/GmcI9QeftSFD5Y8p5prPsMkQbOAbOUSBlPgwY+Syl QmP8xNNzbj00UF8GvRL/m9O75geis/I+op5E7hJqaO5U1yd+ww5Z1EFvDEkUOeYu BclqCg1jTnzBzE/FaRP0NWFAUDR+4Z0tumdRES1cDfaMJr3+pYT7y8tjVZn7PEvn Ljq+/pyyiunG3Mbvw2o8 =lFBU -----END PGP SIGNATURE----- Merge remote-tracking branch 'afaerber/tags/qom-cpu-for-anthony' into staging QOM CPUState refactorings * Fix for OpenRISCCPU subclasses * Fix for gdbstub CPU selection * Move linux-user CPU functions into new header * CPUState part 10 refactoring: first_cpu, next_cpu, cpu_single_env et al. * Fix some targets to consistently inline TCG code generation * Centrally log CPU reset # gpg: Signature made Wed 10 Jul 2013 07:52:39 AM CDT using RSA key ID 3E7E013F # gpg: Can't check signature: public key not found # By Andreas Färber (41) and others # Via Andreas Färber * afaerber/tags/qom-cpu-for-anthony: (43 commits) cpu: Move reset logging to CPUState target-ppc: Change LOG_MMU_STATE() argument to CPUState target-i386: Change LOG_PCALL_STATE() argument to CPUState log: Change log_cpu_state[_mask]() argument to CPUState target-i386: Change do_smm_enter() argument to X86CPU target-i386: Change do_interrupt_all() argument to X86CPU target-xtensa: Change gen_intermediate_code_internal() arg to XtensaCPU target-unicore32: Change gen_intermediate_code_internal() signature target-sparc: Change gen_intermediate_code_internal() argument to SPARCCPU target-sh4: Change gen_intermediate_code_internal() argument to SuperHCPU target-s390x: Change gen_intermediate_code_internal() argument to S390CPU target-ppc: Change gen_intermediate_code_internal() argument to PowerPCCPU target-mips: Change gen_intermediate_code_internal() argument to MIPSCPU target-microblaze: Change gen_intermediate_code_internal() argument types target-m68k: Change gen_intermediate_code_internal() argument to M68kCPU target-lm32: Change gen_intermediate_code_internal() argument to LM32CPU target-i386: Change gen_intermediate_code_internal() argument to X86CPU target-cris: Change gen_intermediate_code_internal() argument to CRISCPU target-arm: Change gen_intermediate_code_internal() argument to ARMCPU target-alpha: Change gen_intermediate_code_internal() argument to AlphaCPU ...
This commit is contained in:
commit
51455c59dd
|
@ -98,7 +98,7 @@ enum {
|
|||
static const char *get_elf_platform(void)
|
||||
{
|
||||
static char elf_platform[] = "i386";
|
||||
int family = (thread_env->cpuid_version >> 8) & 0xff;
|
||||
int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
|
||||
if (family > 6)
|
||||
family = 6;
|
||||
if (family >= 3)
|
||||
|
@ -110,7 +110,9 @@ static const char *get_elf_platform(void)
|
|||
|
||||
static uint32_t get_elf_hwcap(void)
|
||||
{
|
||||
return thread_env->features[FEAT_1_EDX];
|
||||
X86CPU *cpu = X86_CPU(thread_cpu);
|
||||
|
||||
return cpu->env.features[FEAT_1_EDX];
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
|
|
|
@ -92,7 +92,7 @@ void fork_start(void)
|
|||
void fork_end(int child)
|
||||
{
|
||||
if (child) {
|
||||
gdbserver_fork(thread_env);
|
||||
gdbserver_fork((CPUArchState *)thread_cpu->env_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -713,7 +713,7 @@ static void usage(void)
|
|||
exit(1);
|
||||
}
|
||||
|
||||
THREAD CPUArchState *thread_env;
|
||||
THREAD CPUState *thread_cpu;
|
||||
|
||||
/* Assumes contents are already zeroed. */
|
||||
void init_task_state(TaskState *ts)
|
||||
|
@ -915,7 +915,7 @@ int main(int argc, char **argv)
|
|||
#if defined(TARGET_SPARC) || defined(TARGET_PPC)
|
||||
cpu_reset(ENV_GET_CPU(env));
|
||||
#endif
|
||||
thread_env = env;
|
||||
thread_cpu = ENV_GET_CPU(env);
|
||||
|
||||
if (getenv("QEMU_STRACE")) {
|
||||
do_strace = 1;
|
||||
|
|
|
@ -139,7 +139,7 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1,
|
|||
abi_long arg2, abi_long arg3, abi_long arg4,
|
||||
abi_long arg5, abi_long arg6);
|
||||
void gemu_log(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
|
||||
extern THREAD CPUArchState *thread_env;
|
||||
extern THREAD CPUState *thread_cpu;
|
||||
void cpu_loop(CPUArchState *env);
|
||||
char *target_strerror(int err);
|
||||
int get_osversion(void);
|
||||
|
|
21
cpu-exec.c
21
cpu-exec.c
|
@ -213,12 +213,12 @@ int cpu_exec(CPUArchState *env)
|
|||
cpu->halted = 0;
|
||||
}
|
||||
|
||||
cpu_single_env = env;
|
||||
current_cpu = cpu;
|
||||
|
||||
/* As long as cpu_single_env is null, up to the assignment just above,
|
||||
/* As long as current_cpu is null, up to the assignment just above,
|
||||
* requests by other threads to exit the execution loop are expected to
|
||||
* be issued using the exit_request global. We must make sure that our
|
||||
* evaluation of the global value is performed past the cpu_single_env
|
||||
* evaluation of the global value is performed past the current_cpu
|
||||
* value transition point, which requires a memory barrier as well as
|
||||
* an instruction scheduling constraint on modern architectures. */
|
||||
smp_mb();
|
||||
|
@ -331,7 +331,7 @@ int cpu_exec(CPUArchState *env)
|
|||
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
|
||||
0);
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
||||
do_smm_enter(env);
|
||||
do_smm_enter(x86_env_get_cpu(env));
|
||||
next_tb = 0;
|
||||
} else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
|
||||
!(env->hflags2 & HF2_NMI_MASK)) {
|
||||
|
@ -577,15 +577,15 @@ int cpu_exec(CPUArchState *env)
|
|||
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
|
||||
/* restore flags in standard format */
|
||||
#if defined(TARGET_I386)
|
||||
log_cpu_state(env, CPU_DUMP_CCOP);
|
||||
log_cpu_state(cpu, CPU_DUMP_CCOP);
|
||||
#elif defined(TARGET_M68K)
|
||||
cpu_m68k_flush_flags(env, env->cc_op);
|
||||
env->cc_op = CC_OP_FLAGS;
|
||||
env->sr = (env->sr & 0xffe0)
|
||||
| env->cc_dest | (env->cc_x << 4);
|
||||
log_cpu_state(env, 0);
|
||||
log_cpu_state(cpu, 0);
|
||||
#else
|
||||
log_cpu_state(env, 0);
|
||||
log_cpu_state(cpu, 0);
|
||||
#endif
|
||||
}
|
||||
#endif /* DEBUG_DISAS */
|
||||
|
@ -673,7 +673,8 @@ int cpu_exec(CPUArchState *env)
|
|||
} else {
|
||||
/* Reload env after longjmp - the compiler may have smashed all
|
||||
* local variables as longjmp is marked 'noreturn'. */
|
||||
env = cpu_single_env;
|
||||
cpu = current_cpu;
|
||||
env = cpu->env_ptr;
|
||||
}
|
||||
} /* for(;;) */
|
||||
|
||||
|
@ -707,7 +708,7 @@ int cpu_exec(CPUArchState *env)
|
|||
#error unsupported target CPU
|
||||
#endif
|
||||
|
||||
/* fail safe : never use cpu_single_env outside cpu_exec() */
|
||||
cpu_single_env = NULL;
|
||||
/* fail safe : never use current_cpu outside cpu_exec() */
|
||||
current_cpu = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
|
167
cpus.c
167
cpus.c
|
@ -60,7 +60,7 @@
|
|||
|
||||
#endif /* CONFIG_LINUX */
|
||||
|
||||
static CPUArchState *next_cpu;
|
||||
static CPUState *next_cpu;
|
||||
|
||||
static bool cpu_thread_is_idle(CPUState *cpu)
|
||||
{
|
||||
|
@ -79,10 +79,10 @@ static bool cpu_thread_is_idle(CPUState *cpu)
|
|||
|
||||
static bool all_cpu_threads_idle(void)
|
||||
{
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
if (!cpu_thread_is_idle(ENV_GET_CPU(env))) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
if (!cpu_thread_is_idle(cpu)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -118,10 +118,11 @@ TimersState timers_state;
|
|||
int64_t cpu_get_icount(void)
|
||||
{
|
||||
int64_t icount;
|
||||
CPUArchState *env = cpu_single_env;
|
||||
CPUState *cpu = current_cpu;
|
||||
|
||||
icount = qemu_icount;
|
||||
if (env) {
|
||||
if (cpu) {
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
if (!can_do_io(env)) {
|
||||
fprintf(stderr, "Bad clock read\n");
|
||||
}
|
||||
|
@ -387,15 +388,13 @@ void configure_icount(const char *option)
|
|||
void hw_error(const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
|
||||
va_start(ap, fmt);
|
||||
fprintf(stderr, "qemu: hardware error: ");
|
||||
vfprintf(stderr, fmt, ap);
|
||||
fprintf(stderr, "\n");
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
|
||||
cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
|
||||
}
|
||||
|
@ -405,28 +404,28 @@ void hw_error(const char *fmt, ...)
|
|||
|
||||
void cpu_synchronize_all_states(void)
|
||||
{
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
|
||||
for (env = first_cpu; env; env = env->next_cpu) {
|
||||
cpu_synchronize_state(ENV_GET_CPU(env));
|
||||
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
||||
cpu_synchronize_state(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_synchronize_all_post_reset(void)
|
||||
{
|
||||
CPUArchState *cpu;
|
||||
CPUState *cpu;
|
||||
|
||||
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
||||
cpu_synchronize_post_reset(ENV_GET_CPU(cpu));
|
||||
cpu_synchronize_post_reset(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_synchronize_all_post_init(void)
|
||||
{
|
||||
CPUArchState *cpu;
|
||||
CPUState *cpu;
|
||||
|
||||
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
||||
cpu_synchronize_post_init(ENV_GET_CPU(cpu));
|
||||
cpu_synchronize_post_init(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -468,8 +467,8 @@ static void cpu_handle_guest_debug(CPUState *cpu)
|
|||
|
||||
static void cpu_signal(int sig)
|
||||
{
|
||||
if (cpu_single_env) {
|
||||
cpu_exit(ENV_GET_CPU(cpu_single_env));
|
||||
if (current_cpu) {
|
||||
cpu_exit(current_cpu);
|
||||
}
|
||||
exit_request = 1;
|
||||
}
|
||||
|
@ -660,10 +659,10 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
|
|||
|
||||
qemu_cpu_kick(cpu);
|
||||
while (!wi.done) {
|
||||
CPUArchState *self_env = cpu_single_env;
|
||||
CPUState *self_cpu = current_cpu;
|
||||
|
||||
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
|
||||
cpu_single_env = self_env;
|
||||
current_cpu = self_cpu;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -697,7 +696,7 @@ static void qemu_wait_io_event_common(CPUState *cpu)
|
|||
|
||||
static void qemu_tcg_wait_io_event(void)
|
||||
{
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
|
||||
while (all_cpu_threads_idle()) {
|
||||
/* Start accounting real time to the virtual clock if the CPUs
|
||||
|
@ -710,8 +709,8 @@ static void qemu_tcg_wait_io_event(void)
|
|||
qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
|
||||
}
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
qemu_wait_io_event_common(ENV_GET_CPU(env));
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
qemu_wait_io_event_common(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -733,7 +732,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
|
|||
qemu_mutex_lock(&qemu_global_mutex);
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu_single_env = cpu->env_ptr;
|
||||
current_cpu = cpu;
|
||||
|
||||
r = kvm_init_vcpu(cpu);
|
||||
if (r < 0) {
|
||||
|
@ -781,9 +780,9 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
|
|||
cpu->created = true;
|
||||
qemu_cond_signal(&qemu_cpu_cond);
|
||||
|
||||
cpu_single_env = cpu->env_ptr;
|
||||
current_cpu = cpu;
|
||||
while (1) {
|
||||
cpu_single_env = NULL;
|
||||
current_cpu = NULL;
|
||||
qemu_mutex_unlock_iothread();
|
||||
do {
|
||||
int sig;
|
||||
|
@ -794,7 +793,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
|
|||
exit(1);
|
||||
}
|
||||
qemu_mutex_lock_iothread();
|
||||
cpu_single_env = cpu->env_ptr;
|
||||
current_cpu = cpu;
|
||||
qemu_wait_io_event_common(cpu);
|
||||
}
|
||||
|
||||
|
@ -813,7 +812,6 @@ static void tcg_signal_cpu_creation(CPUState *cpu, void *data)
|
|||
static void *qemu_tcg_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
CPUArchState *env;
|
||||
|
||||
qemu_tcg_init_cpu_signals();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
@ -823,12 +821,12 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
|||
qemu_cond_signal(&qemu_cpu_cond);
|
||||
|
||||
/* wait for initial kick-off after machine start */
|
||||
while (ENV_GET_CPU(first_cpu)->stopped) {
|
||||
while (first_cpu->stopped) {
|
||||
qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
|
||||
|
||||
/* process any pending work */
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
qemu_wait_io_event_common(ENV_GET_CPU(env));
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
qemu_wait_io_event_common(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -894,12 +892,11 @@ void qemu_cpu_kick(CPUState *cpu)
|
|||
void qemu_cpu_kick_self(void)
|
||||
{
|
||||
#ifndef _WIN32
|
||||
assert(cpu_single_env);
|
||||
CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
|
||||
assert(current_cpu);
|
||||
|
||||
if (!cpu_single_cpu->thread_kicked) {
|
||||
qemu_cpu_kick_thread(cpu_single_cpu);
|
||||
cpu_single_cpu->thread_kicked = true;
|
||||
if (!current_cpu->thread_kicked) {
|
||||
qemu_cpu_kick_thread(current_cpu);
|
||||
current_cpu->thread_kicked = true;
|
||||
}
|
||||
#else
|
||||
abort();
|
||||
|
@ -913,7 +910,7 @@ bool qemu_cpu_is_self(CPUState *cpu)
|
|||
|
||||
static bool qemu_in_vcpu_thread(void)
|
||||
{
|
||||
return cpu_single_env && qemu_cpu_is_self(ENV_GET_CPU(cpu_single_env));
|
||||
return current_cpu && qemu_cpu_is_self(current_cpu);
|
||||
}
|
||||
|
||||
void qemu_mutex_lock_iothread(void)
|
||||
|
@ -923,7 +920,7 @@ void qemu_mutex_lock_iothread(void)
|
|||
} else {
|
||||
iothread_requesting_mutex = true;
|
||||
if (qemu_mutex_trylock(&qemu_global_mutex)) {
|
||||
qemu_cpu_kick_thread(ENV_GET_CPU(first_cpu));
|
||||
qemu_cpu_kick_thread(first_cpu);
|
||||
qemu_mutex_lock(&qemu_global_mutex);
|
||||
}
|
||||
iothread_requesting_mutex = false;
|
||||
|
@ -938,14 +935,13 @@ void qemu_mutex_unlock_iothread(void)
|
|||
|
||||
static int all_vcpus_paused(void)
|
||||
{
|
||||
CPUArchState *penv = first_cpu;
|
||||
CPUState *cpu = first_cpu;
|
||||
|
||||
while (penv) {
|
||||
CPUState *pcpu = ENV_GET_CPU(penv);
|
||||
if (!pcpu->stopped) {
|
||||
while (cpu) {
|
||||
if (!cpu->stopped) {
|
||||
return 0;
|
||||
}
|
||||
penv = penv->next_cpu;
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
@ -953,25 +949,23 @@ static int all_vcpus_paused(void)
|
|||
|
||||
void pause_all_vcpus(void)
|
||||
{
|
||||
CPUArchState *penv = first_cpu;
|
||||
CPUState *cpu = first_cpu;
|
||||
|
||||
qemu_clock_enable(vm_clock, false);
|
||||
while (penv) {
|
||||
CPUState *pcpu = ENV_GET_CPU(penv);
|
||||
pcpu->stop = true;
|
||||
qemu_cpu_kick(pcpu);
|
||||
penv = penv->next_cpu;
|
||||
while (cpu) {
|
||||
cpu->stop = true;
|
||||
qemu_cpu_kick(cpu);
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
|
||||
if (qemu_in_vcpu_thread()) {
|
||||
cpu_stop_current();
|
||||
if (!kvm_enabled()) {
|
||||
penv = first_cpu;
|
||||
while (penv) {
|
||||
CPUState *pcpu = ENV_GET_CPU(penv);
|
||||
pcpu->stop = false;
|
||||
pcpu->stopped = true;
|
||||
penv = penv->next_cpu;
|
||||
cpu = first_cpu;
|
||||
while (cpu) {
|
||||
cpu->stop = false;
|
||||
cpu->stopped = true;
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -979,10 +973,10 @@ void pause_all_vcpus(void)
|
|||
|
||||
while (!all_vcpus_paused()) {
|
||||
qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
|
||||
penv = first_cpu;
|
||||
while (penv) {
|
||||
qemu_cpu_kick(ENV_GET_CPU(penv));
|
||||
penv = penv->next_cpu;
|
||||
cpu = first_cpu;
|
||||
while (cpu) {
|
||||
qemu_cpu_kick(cpu);
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -996,13 +990,12 @@ void cpu_resume(CPUState *cpu)
|
|||
|
||||
void resume_all_vcpus(void)
|
||||
{
|
||||
CPUArchState *penv = first_cpu;
|
||||
CPUState *cpu = first_cpu;
|
||||
|
||||
qemu_clock_enable(vm_clock, true);
|
||||
while (penv) {
|
||||
CPUState *pcpu = ENV_GET_CPU(penv);
|
||||
cpu_resume(pcpu);
|
||||
penv = penv->next_cpu;
|
||||
while (cpu) {
|
||||
cpu_resume(cpu);
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1069,11 +1062,10 @@ void qemu_init_vcpu(CPUState *cpu)
|
|||
|
||||
void cpu_stop_current(void)
|
||||
{
|
||||
if (cpu_single_env) {
|
||||
CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
|
||||
cpu_single_cpu->stop = false;
|
||||
cpu_single_cpu->stopped = true;
|
||||
cpu_exit(cpu_single_cpu);
|
||||
if (current_cpu) {
|
||||
current_cpu->stop = false;
|
||||
current_cpu->stopped = true;
|
||||
cpu_exit(current_cpu);
|
||||
qemu_cond_signal(&qemu_pause_cond);
|
||||
}
|
||||
}
|
||||
|
@ -1152,8 +1144,8 @@ static void tcg_exec_all(void)
|
|||
next_cpu = first_cpu;
|
||||
}
|
||||
for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
|
||||
CPUArchState *env = next_cpu;
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
CPUState *cpu = next_cpu;
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
qemu_clock_enable(vm_clock,
|
||||
(env->singlestep_enabled & SSTEP_NOTIMER) == 0);
|
||||
|
@ -1173,12 +1165,10 @@ static void tcg_exec_all(void)
|
|||
|
||||
void set_numa_modes(void)
|
||||
{
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
int i;
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
for (i = 0; i < nb_numa_nodes; i++) {
|
||||
if (test_bit(cpu->cpu_index, node_cpumask[i])) {
|
||||
cpu->numa_node = i;
|
||||
|
@ -1198,18 +1188,30 @@ void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
|
|||
CpuInfoList *qmp_query_cpus(Error **errp)
|
||||
{
|
||||
CpuInfoList *head = NULL, *cur_item = NULL;
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CpuInfoList *info;
|
||||
#if defined(TARGET_I386)
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
#elif defined(TARGET_PPC)
|
||||
PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
|
||||
CPUPPCState *env = &ppc_cpu->env;
|
||||
#elif defined(TARGET_SPARC)
|
||||
SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
|
||||
CPUSPARCState *env = &sparc_cpu->env;
|
||||
#elif defined(TARGET_MIPS)
|
||||
MIPSCPU *mips_cpu = MIPS_CPU(cpu);
|
||||
CPUMIPSState *env = &mips_cpu->env;
|
||||
#endif
|
||||
|
||||
cpu_synchronize_state(cpu);
|
||||
|
||||
info = g_malloc0(sizeof(*info));
|
||||
info->value = g_malloc0(sizeof(*info->value));
|
||||
info->value->CPU = cpu->cpu_index;
|
||||
info->value->current = (env == first_cpu);
|
||||
info->value->current = (cpu == first_cpu);
|
||||
info->value->halted = cpu->halted;
|
||||
info->value->thread_id = cpu->thread_id;
|
||||
#if defined(TARGET_I386)
|
||||
|
@ -1317,11 +1319,14 @@ exit:
|
|||
void qmp_inject_nmi(Error **errp)
|
||||
{
|
||||
#if defined(TARGET_I386)
|
||||
CPUArchState *env;
|
||||
CPUState *cs;
|
||||
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
if (!env->apic_state) {
|
||||
cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_NMI);
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_NMI);
|
||||
} else {
|
||||
apic_deliver_nmi(env->apic_state);
|
||||
}
|
||||
|
|
4
cputlb.c
4
cputlb.c
|
@ -186,11 +186,13 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
|
|||
|
||||
void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length)
|
||||
{
|
||||
CPUState *cpu;
|
||||
CPUArchState *env;
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
int mmu_idx;
|
||||
|
||||
env = cpu->env_ptr;
|
||||
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
||||
unsigned int i;
|
||||
|
||||
|
|
16
dump.c
16
dump.c
|
@ -275,13 +275,11 @@ static inline int cpu_index(CPUState *cpu)
|
|||
|
||||
static int write_elf64_notes(DumpState *s)
|
||||
{
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
int ret;
|
||||
int id;
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
id = cpu_index(cpu);
|
||||
ret = cpu_write_elf64_note(fd_write_vmcore, cpu, id, s);
|
||||
if (ret < 0) {
|
||||
|
@ -290,7 +288,7 @@ static int write_elf64_notes(DumpState *s)
|
|||
}
|
||||
}
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
ret = cpu_write_elf64_qemunote(fd_write_vmcore, cpu, s);
|
||||
if (ret < 0) {
|
||||
dump_error(s, "dump: failed to write CPU status.\n");
|
||||
|
@ -327,13 +325,11 @@ static int write_elf32_note(DumpState *s)
|
|||
|
||||
static int write_elf32_notes(DumpState *s)
|
||||
{
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
int ret;
|
||||
int id;
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
id = cpu_index(cpu);
|
||||
ret = cpu_write_elf32_note(fd_write_vmcore, cpu, id, s);
|
||||
if (ret < 0) {
|
||||
|
@ -342,7 +338,7 @@ static int write_elf32_notes(DumpState *s)
|
|||
}
|
||||
}
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
ret = cpu_write_elf32_qemunote(fd_write_vmcore, cpu, s);
|
||||
if (ret < 0) {
|
||||
dump_error(s, "dump: failed to write CPU status.\n");
|
||||
|
@ -705,7 +701,7 @@ static ram_addr_t get_start_block(DumpState *s)
|
|||
static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
|
||||
int64_t begin, int64_t length, Error **errp)
|
||||
{
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
int nr_cpus;
|
||||
Error *err = NULL;
|
||||
int ret;
|
||||
|
@ -738,7 +734,7 @@ static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
|
|||
*/
|
||||
cpu_synchronize_all_states();
|
||||
nr_cpus = 0;
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
nr_cpus++;
|
||||
}
|
||||
|
||||
|
|
57
exec.c
57
exec.c
|
@ -69,10 +69,10 @@ static MemoryRegion io_mem_unassigned;
|
|||
|
||||
#endif
|
||||
|
||||
CPUArchState *first_cpu;
|
||||
CPUState *first_cpu;
|
||||
/* current CPU in the current thread. It is only valid inside
|
||||
cpu_exec() */
|
||||
DEFINE_TLS(CPUArchState *,cpu_single_env);
|
||||
DEFINE_TLS(CPUState *, current_cpu);
|
||||
/* 0 = Do not count executed instructions.
|
||||
1 = Precise instruction counting.
|
||||
2 = Adaptive rate instruction counting. */
|
||||
|
@ -351,27 +351,26 @@ const VMStateDescription vmstate_cpu_common = {
|
|||
|
||||
CPUState *qemu_get_cpu(int index)
|
||||
{
|
||||
CPUArchState *env = first_cpu;
|
||||
CPUState *cpu = NULL;
|
||||
CPUState *cpu = first_cpu;
|
||||
|
||||
while (env) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
while (cpu) {
|
||||
if (cpu->cpu_index == index) {
|
||||
break;
|
||||
}
|
||||
env = env->next_cpu;
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
|
||||
return env ? cpu : NULL;
|
||||
return cpu;
|
||||
}
|
||||
|
||||
void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
|
||||
{
|
||||
CPUArchState *env = first_cpu;
|
||||
CPUState *cpu;
|
||||
|
||||
while (env) {
|
||||
func(ENV_GET_CPU(env), data);
|
||||
env = env->next_cpu;
|
||||
cpu = first_cpu;
|
||||
while (cpu) {
|
||||
func(cpu, data);
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -379,17 +378,17 @@ void cpu_exec_init(CPUArchState *env)
|
|||
{
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
CPUArchState **penv;
|
||||
CPUState **pcpu;
|
||||
int cpu_index;
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
cpu_list_lock();
|
||||
#endif
|
||||
env->next_cpu = NULL;
|
||||
penv = &first_cpu;
|
||||
cpu->next_cpu = NULL;
|
||||
pcpu = &first_cpu;
|
||||
cpu_index = 0;
|
||||
while (*penv != NULL) {
|
||||
penv = &(*penv)->next_cpu;
|
||||
while (*pcpu != NULL) {
|
||||
pcpu = &(*pcpu)->next_cpu;
|
||||
cpu_index++;
|
||||
}
|
||||
cpu->cpu_index = cpu_index;
|
||||
|
@ -399,7 +398,7 @@ void cpu_exec_init(CPUArchState *env)
|
|||
#ifndef CONFIG_USER_ONLY
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
#endif
|
||||
*penv = env;
|
||||
*pcpu = cpu;
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
cpu_list_unlock();
|
||||
#endif
|
||||
|
@ -618,7 +617,7 @@ void cpu_abort(CPUArchState *env, const char *fmt, ...)
|
|||
qemu_log("qemu: fatal: ");
|
||||
qemu_log_vprintf(fmt, ap2);
|
||||
qemu_log("\n");
|
||||
log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
|
||||
log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
|
||||
qemu_log_flush();
|
||||
qemu_log_close();
|
||||
}
|
||||
|
@ -638,7 +637,6 @@ void cpu_abort(CPUArchState *env, const char *fmt, ...)
|
|||
CPUArchState *cpu_copy(CPUArchState *env)
|
||||
{
|
||||
CPUArchState *new_env = cpu_init(env->cpu_model_str);
|
||||
CPUArchState *next_cpu = new_env->next_cpu;
|
||||
#if defined(TARGET_HAS_ICE)
|
||||
CPUBreakpoint *bp;
|
||||
CPUWatchpoint *wp;
|
||||
|
@ -646,9 +644,6 @@ CPUArchState *cpu_copy(CPUArchState *env)
|
|||
|
||||
memcpy(new_env, env, sizeof(CPUArchState));
|
||||
|
||||
/* Preserve chaining. */
|
||||
new_env->next_cpu = next_cpu;
|
||||
|
||||
/* Clone all break/watchpoints.
|
||||
Note: Once we support ptrace with hw-debug register access, make sure
|
||||
BP_CPU break/watchpoints are handled correctly on clone. */
|
||||
|
@ -1467,8 +1462,10 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
|
|||
cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
|
||||
/* we remove the notdirty callback only if the code has been
|
||||
flushed */
|
||||
if (dirty_flags == 0xff)
|
||||
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
|
||||
if (dirty_flags == 0xff) {
|
||||
CPUArchState *env = current_cpu->env_ptr;
|
||||
tlb_set_dirty(env, env->mem_io_vaddr);
|
||||
}
|
||||
}
|
||||
|
||||
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
|
||||
|
@ -1486,7 +1483,7 @@ static const MemoryRegionOps notdirty_mem_ops = {
|
|||
/* Generate a debug exception if a watchpoint has been hit. */
|
||||
static void check_watchpoint(int offset, int len_mask, int flags)
|
||||
{
|
||||
CPUArchState *env = cpu_single_env;
|
||||
CPUArchState *env = current_cpu->env_ptr;
|
||||
target_ulong pc, cs_base;
|
||||
target_ulong vaddr;
|
||||
CPUWatchpoint *wp;
|
||||
|
@ -1750,12 +1747,14 @@ static void core_commit(MemoryListener *listener)
|
|||
|
||||
static void tcg_commit(MemoryListener *listener)
|
||||
{
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
|
||||
/* since each CPU stores ram addresses in its TLB cache, we must
|
||||
reset the modified entries */
|
||||
/* XXX: slow ! */
|
||||
for(env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
tlb_flush(env, 1);
|
||||
}
|
||||
}
|
||||
|
@ -1925,7 +1924,7 @@ bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
|
|||
if (is_write) {
|
||||
if (!memory_access_is_direct(mr, is_write)) {
|
||||
l = memory_access_size(mr, l, addr1);
|
||||
/* XXX: could force cpu_single_env to NULL to avoid
|
||||
/* XXX: could force current_cpu to NULL to avoid
|
||||
potential bugs */
|
||||
if (l == 4) {
|
||||
/* 32 bit write access */
|
||||
|
|
45
gdbstub.c
45
gdbstub.c
|
@ -1839,6 +1839,7 @@ static const char *get_feature_xml(const char *p, const char **newp)
|
|||
/* Generate the XML description for this CPU. */
|
||||
if (!target_xml[0]) {
|
||||
GDBRegisterState *r;
|
||||
CPUArchState *env = first_cpu->env_ptr;
|
||||
|
||||
snprintf(target_xml, sizeof(target_xml),
|
||||
"<?xml version=\"1.0\"?>"
|
||||
|
@ -1847,7 +1848,7 @@ static const char *get_feature_xml(const char *p, const char **newp)
|
|||
"<xi:include href=\"%s\"/>",
|
||||
GDB_CORE_XML);
|
||||
|
||||
for (r = first_cpu->gdb_regs; r; r = r->next) {
|
||||
for (r = env->gdb_regs; r; r = r->next) {
|
||||
pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
|
||||
pstrcat(target_xml, sizeof(target_xml), r->xml);
|
||||
pstrcat(target_xml, sizeof(target_xml), "\"/>");
|
||||
|
@ -1949,6 +1950,7 @@ static const int xlat_gdb_type[] = {
|
|||
|
||||
static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
|
||||
{
|
||||
CPUState *cpu;
|
||||
CPUArchState *env;
|
||||
int err = 0;
|
||||
|
||||
|
@ -1958,7 +1960,8 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
|
|||
switch (type) {
|
||||
case GDB_BREAKPOINT_SW:
|
||||
case GDB_BREAKPOINT_HW:
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
env = cpu->env_ptr;
|
||||
err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
|
||||
if (err)
|
||||
break;
|
||||
|
@ -1968,7 +1971,8 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
|
|||
case GDB_WATCHPOINT_WRITE:
|
||||
case GDB_WATCHPOINT_READ:
|
||||
case GDB_WATCHPOINT_ACCESS:
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
env = cpu->env_ptr;
|
||||
err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
|
||||
NULL);
|
||||
if (err)
|
||||
|
@ -1983,6 +1987,7 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
|
|||
|
||||
static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
|
||||
{
|
||||
CPUState *cpu;
|
||||
CPUArchState *env;
|
||||
int err = 0;
|
||||
|
||||
|
@ -1992,7 +1997,8 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
|
|||
switch (type) {
|
||||
case GDB_BREAKPOINT_SW:
|
||||
case GDB_BREAKPOINT_HW:
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
env = cpu->env_ptr;
|
||||
err = cpu_breakpoint_remove(env, addr, BP_GDB);
|
||||
if (err)
|
||||
break;
|
||||
|
@ -2002,7 +2008,8 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
|
|||
case GDB_WATCHPOINT_WRITE:
|
||||
case GDB_WATCHPOINT_READ:
|
||||
case GDB_WATCHPOINT_ACCESS:
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
env = cpu->env_ptr;
|
||||
err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
|
||||
if (err)
|
||||
break;
|
||||
|
@ -2016,14 +2023,16 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
|
|||
|
||||
static void gdb_breakpoint_remove_all(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
CPUArchState *env;
|
||||
|
||||
if (kvm_enabled()) {
|
||||
kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
|
||||
kvm_remove_all_breakpoints(ENV_GET_CPU(gdbserver_state->c_cpu));
|
||||
return;
|
||||
}
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
env = cpu->env_ptr;
|
||||
cpu_breakpoint_remove_all(env, BP_GDB);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
cpu_watchpoint_remove_all(env, BP_GDB);
|
||||
|
@ -2073,11 +2082,13 @@ static CPUArchState *find_cpu(uint32_t thread_id)
|
|||
{
|
||||
CPUState *cpu;
|
||||
|
||||
cpu = qemu_get_cpu(thread_id);
|
||||
if (cpu == NULL) {
|
||||
return NULL;
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
if (cpu_index(cpu) == thread_id) {
|
||||
return cpu->env_ptr;
|
||||
}
|
||||
}
|
||||
return cpu->env_ptr;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int gdb_handle_packet(GDBState *s, const char *line_buf)
|
||||
|
@ -2390,7 +2401,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
|
|||
put_packet(s, "QC1");
|
||||
break;
|
||||
} else if (strcmp(p,"fThreadInfo") == 0) {
|
||||
s->query_cpu = first_cpu;
|
||||
s->query_cpu = first_cpu->env_ptr;
|
||||
goto report_cpuinfo;
|
||||
} else if (strcmp(p,"sThreadInfo") == 0) {
|
||||
report_cpuinfo:
|
||||
|
@ -2398,7 +2409,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
|
|||
snprintf(buf, sizeof(buf), "m%x",
|
||||
cpu_index(ENV_GET_CPU(s->query_cpu)));
|
||||
put_packet(s, buf);
|
||||
s->query_cpu = s->query_cpu->next_cpu;
|
||||
s->query_cpu = ENV_GET_CPU(s->query_cpu)->next_cpu->env_ptr;
|
||||
} else
|
||||
put_packet(s, "l");
|
||||
break;
|
||||
|
@ -2865,8 +2876,8 @@ static void gdb_accept(void)
|
|||
socket_set_nodelay(fd);
|
||||
|
||||
s = g_malloc0(sizeof(GDBState));
|
||||
s->c_cpu = first_cpu;
|
||||
s->g_cpu = first_cpu;
|
||||
s->c_cpu = first_cpu->env_ptr;
|
||||
s->g_cpu = first_cpu->env_ptr;
|
||||
s->fd = fd;
|
||||
gdb_has_xml = 0;
|
||||
|
||||
|
@ -3050,8 +3061,8 @@ int gdbserver_start(const char *device)
|
|||
mon_chr = s->mon_chr;
|
||||
memset(s, 0, sizeof(GDBState));
|
||||
}
|
||||
s->c_cpu = first_cpu;
|
||||
s->g_cpu = first_cpu;
|
||||
s->c_cpu = first_cpu->env_ptr;
|
||||
s->g_cpu = first_cpu->env_ptr;
|
||||
s->chr = chr;
|
||||
s->state = chr ? RS_IDLE : RS_INACTIVE;
|
||||
s->mon_chr = mon_chr;
|
||||
|
|
|
@ -72,9 +72,8 @@ static void cpu_irq_change(AlphaCPU *cpu, uint64_t req)
|
|||
|
||||
static uint64_t cchip_read(void *opaque, hwaddr addr, unsigned size)
|
||||
{
|
||||
CPUAlphaState *env = cpu_single_env;
|
||||
CPUState *cpu = current_cpu;
|
||||
TyphoonState *s = opaque;
|
||||
CPUState *cpu;
|
||||
uint64_t ret = 0;
|
||||
|
||||
if (addr & 4) {
|
||||
|
@ -95,7 +94,6 @@ static uint64_t cchip_read(void *opaque, hwaddr addr, unsigned size)
|
|||
|
||||
case 0x0080:
|
||||
/* MISC: Miscellaneous Register. */
|
||||
cpu = ENV_GET_CPU(env);
|
||||
ret = s->cchip.misc | (cpu->cpu_index & 3);
|
||||
break;
|
||||
|
||||
|
@ -197,7 +195,6 @@ static uint64_t cchip_read(void *opaque, hwaddr addr, unsigned size)
|
|||
break;
|
||||
|
||||
default:
|
||||
cpu = CPU(alpha_env_get_cpu(cpu_single_env));
|
||||
cpu_unassigned_access(cpu, addr, false, false, 0, size);
|
||||
return -1;
|
||||
}
|
||||
|
@ -215,7 +212,6 @@ static uint64_t dchip_read(void *opaque, hwaddr addr, unsigned size)
|
|||
static uint64_t pchip_read(void *opaque, hwaddr addr, unsigned size)
|
||||
{
|
||||
TyphoonState *s = opaque;
|
||||
CPUState *cs;
|
||||
uint64_t ret = 0;
|
||||
|
||||
if (addr & 4) {
|
||||
|
@ -302,8 +298,7 @@ static uint64_t pchip_read(void *opaque, hwaddr addr, unsigned size)
|
|||
break;
|
||||
|
||||
default:
|
||||
cs = CPU(alpha_env_get_cpu(cpu_single_env));
|
||||
cpu_unassigned_access(cs, addr, false, false, 0, size);
|
||||
cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -315,7 +310,6 @@ static void cchip_write(void *opaque, hwaddr addr,
|
|||
uint64_t v32, unsigned size)
|
||||
{
|
||||
TyphoonState *s = opaque;
|
||||
CPUState *cpu_single_cpu = CPU(alpha_env_get_cpu(cpu_single_env));
|
||||
uint64_t val, oldval, newval;
|
||||
|
||||
if (addr & 4) {
|
||||
|
@ -465,7 +459,7 @@ static void cchip_write(void *opaque, hwaddr addr,
|
|||
break;
|
||||
|
||||
default:
|
||||
cpu_unassigned_access(cpu_single_cpu, addr, true, false, 0, size);
|
||||
cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -480,7 +474,6 @@ static void pchip_write(void *opaque, hwaddr addr,
|
|||
uint64_t v32, unsigned size)
|
||||
{
|
||||
TyphoonState *s = opaque;
|
||||
CPUState *cs;
|
||||
uint64_t val, oldval;
|
||||
|
||||
if (addr & 4) {
|
||||
|
@ -582,8 +575,7 @@ static void pchip_write(void *opaque, hwaddr addr,
|
|||
break;
|
||||
|
||||
default:
|
||||
cs = CPU(alpha_env_get_cpu(cpu_single_env));
|
||||
cpu_unassigned_access(cs, addr, true, false, 0, size);
|
||||
cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -333,7 +333,7 @@ static void do_cpu_reset(void *opaque)
|
|||
env->regs[15] = info->entry & 0xfffffffe;
|
||||
env->thumb = info->entry & 1;
|
||||
} else {
|
||||
if (env == first_cpu) {
|
||||
if (CPU(cpu) == first_cpu) {
|
||||
env->regs[15] = info->loader_start;
|
||||
if (!info->dtb_filename) {
|
||||
if (old_param) {
|
||||
|
@ -351,7 +351,7 @@ static void do_cpu_reset(void *opaque)
|
|||
|
||||
void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
|
||||
{
|
||||
CPUARMState *env = &cpu->env;
|
||||
CPUState *cs = CPU(cpu);
|
||||
int kernel_size;
|
||||
int initrd_size;
|
||||
int n;
|
||||
|
@ -470,9 +470,9 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
|
|||
}
|
||||
info->is_linux = is_linux;
|
||||
|
||||
for (; env; env = env->next_cpu) {
|
||||
cpu = arm_env_get_cpu(env);
|
||||
env->boot_info = info;
|
||||
for (; cs; cs = cs->next_cpu) {
|
||||
cpu = ARM_CPU(cs);
|
||||
cpu->env.boot_info = info;
|
||||
qemu_register_reset(do_cpu_reset, cpu);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ static void nuri_init(QEMUMachineInitArgs *args)
|
|||
{
|
||||
exynos4_boards_init_common(args, EXYNOS4_BOARD_NURI);
|
||||
|
||||
arm_load_kernel(arm_env_get_cpu(first_cpu), &exynos4_board_binfo);
|
||||
arm_load_kernel(ARM_CPU(first_cpu), &exynos4_board_binfo);
|
||||
}
|
||||
|
||||
static void smdkc210_init(QEMUMachineInitArgs *args)
|
||||
|
@ -141,7 +141,7 @@ static void smdkc210_init(QEMUMachineInitArgs *args)
|
|||
|
||||
lan9215_init(SMDK_LAN9118_BASE_ADDR,
|
||||
qemu_irq_invert(s->irq_table[exynos4210_get_irq(37, 1)]));
|
||||
arm_load_kernel(arm_env_get_cpu(first_cpu), &exynos4_board_binfo);
|
||||
arm_load_kernel(ARM_CPU(first_cpu), &exynos4_board_binfo);
|
||||
}
|
||||
|
||||
static QEMUMachine exynos4_machines[EXYNOS4_NUM_OF_BOARDS] = {
|
||||
|
|
|
@ -321,7 +321,7 @@ static void highbank_init(QEMUMachineInitArgs *args)
|
|||
highbank_binfo.loader_start = 0;
|
||||
highbank_binfo.write_secondary_boot = hb_write_secondary;
|
||||
highbank_binfo.secondary_cpu_reset_hook = hb_reset_secondary;
|
||||
arm_load_kernel(arm_env_get_cpu(first_cpu), &highbank_binfo);
|
||||
arm_load_kernel(ARM_CPU(first_cpu), &highbank_binfo);
|
||||
}
|
||||
|
||||
static QEMUMachine highbank_machine = {
|
||||
|
|
|
@ -301,8 +301,7 @@ static int pxa2xx_pwrmode_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|||
#endif
|
||||
|
||||
/* Suspend */
|
||||
cpu_interrupt(CPU(arm_env_get_cpu(cpu_single_env)),
|
||||
CPU_INTERRUPT_HALT);
|
||||
cpu_interrupt(current_cpu, CPU_INTERRUPT_HALT);
|
||||
|
||||
goto message;
|
||||
|
||||
|
|
|
@ -331,7 +331,7 @@ static void realview_init(QEMUMachineInitArgs *args,
|
|||
realview_binfo.nb_cpus = smp_cpus;
|
||||
realview_binfo.board_id = realview_board_id[board_type];
|
||||
realview_binfo.loader_start = (board_type == BOARD_PB_A8 ? 0x70000000 : 0);
|
||||
arm_load_kernel(arm_env_get_cpu(first_cpu), &realview_binfo);
|
||||
arm_load_kernel(ARM_CPU(first_cpu), &realview_binfo);
|
||||
}
|
||||
|
||||
static void realview_eb_init(QEMUMachineInitArgs *args)
|
||||
|
|
|
@ -519,7 +519,7 @@ static void vexpress_common_init(const VEDBoardInfo *daughterboard,
|
|||
vexpress_binfo.smp_loader_start = map[VE_SRAM];
|
||||
vexpress_binfo.smp_bootreg_addr = map[VE_SYSREGS] + 0x30;
|
||||
vexpress_binfo.gic_cpu_if_addr = daughterboard->gic_cpu_if_addr;
|
||||
arm_load_kernel(arm_env_get_cpu(first_cpu), &vexpress_binfo);
|
||||
arm_load_kernel(ARM_CPU(first_cpu), &vexpress_binfo);
|
||||
}
|
||||
|
||||
static void vexpress_a9_init(QEMUMachineInitArgs *args)
|
||||
|
|
|
@ -226,7 +226,7 @@ static void zynq_init(QEMUMachineInitArgs *args)
|
|||
zynq_binfo.nb_cpus = 1;
|
||||
zynq_binfo.board_id = 0xd32;
|
||||
zynq_binfo.loader_start = 0;
|
||||
arm_load_kernel(arm_env_get_cpu(first_cpu), &zynq_binfo);
|
||||
arm_load_kernel(ARM_CPU(first_cpu), &zynq_binfo);
|
||||
}
|
||||
|
||||
static QEMUMachine zynq_machine = {
|
||||
|
|
|
@ -33,7 +33,7 @@ static void kvmclock_vm_state_change(void *opaque, int running,
|
|||
RunState state)
|
||||
{
|
||||
KVMClockState *s = opaque;
|
||||
CPUArchState *penv = first_cpu;
|
||||
CPUState *cpu = first_cpu;
|
||||
int cap_clock_ctrl = kvm_check_extension(kvm_state, KVM_CAP_KVMCLOCK_CTRL);
|
||||
int ret;
|
||||
|
||||
|
@ -53,8 +53,8 @@ static void kvmclock_vm_state_change(void *opaque, int running,
|
|||
if (!cap_clock_ctrl) {
|
||||
return;
|
||||
}
|
||||
for (penv = first_cpu; penv != NULL; penv = penv->next_cpu) {
|
||||
ret = kvm_vcpu_ioctl(ENV_GET_CPU(penv), KVM_KVMCLOCK_CTRL, 0);
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
ret = kvm_vcpu_ioctl(cpu, KVM_KVMCLOCK_CTRL, 0);
|
||||
if (ret) {
|
||||
if (ret != -EINVAL) {
|
||||
fprintf(stderr, "%s: %s\n", __func__, strerror(-ret));
|
||||
|
@ -124,9 +124,11 @@ static const TypeInfo kvmclock_info = {
|
|||
/* Note: Must be called after VCPU initialization. */
|
||||
void kvmclock_create(void)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(first_cpu);
|
||||
|
||||
if (kvm_enabled() &&
|
||||
first_cpu->features[FEAT_KVM] & ((1ULL << KVM_FEATURE_CLOCKSOURCE) |
|
||||
(1ULL << KVM_FEATURE_CLOCKSOURCE2))) {
|
||||
cpu->env.features[FEAT_KVM] & ((1ULL << KVM_FEATURE_CLOCKSOURCE) |
|
||||
(1ULL << KVM_FEATURE_CLOCKSOURCE2))) {
|
||||
sysbus_create_simple("kvmclock", -1, NULL);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -490,13 +490,15 @@ static void vapic_enable_tpr_reporting(bool enable)
|
|||
VAPICEnableTPRReporting info = {
|
||||
.enable = enable,
|
||||
};
|
||||
CPUState *cs;
|
||||
X86CPU *cpu;
|
||||
CPUX86State *env;
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cpu = x86_env_get_cpu(env);
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
cpu = X86_CPU(cs);
|
||||
env = &cpu->env;
|
||||
info.apic = env->apic_state;
|
||||
run_on_cpu(CPU(cpu), vapic_do_enable_tpr_reporting, &info);
|
||||
run_on_cpu(cs, vapic_do_enable_tpr_reporting, &info);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -624,11 +626,13 @@ static int vapic_prepare(VAPICROMState *s)
|
|||
static void vapic_write(void *opaque, hwaddr addr, uint64_t data,
|
||||
unsigned int size)
|
||||
{
|
||||
CPUX86State *env = cpu_single_env;
|
||||
CPUState *cs = current_cpu;
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
hwaddr rom_paddr;
|
||||
VAPICROMState *s = opaque;
|
||||
|
||||
cpu_synchronize_state(CPU(x86_env_get_cpu(env)));
|
||||
cpu_synchronize_state(cs);
|
||||
|
||||
/*
|
||||
* The VAPIC supports two PIO-based hypercalls, both via port 0x7E.
|
||||
|
@ -717,8 +721,9 @@ static int vapic_init(SysBusDevice *dev)
|
|||
static void do_vapic_enable(void *data)
|
||||
{
|
||||
VAPICROMState *s = data;
|
||||
X86CPU *cpu = X86_CPU(first_cpu);
|
||||
|
||||
vapic_enable(s, first_cpu);
|
||||
vapic_enable(s, &cpu->env);
|
||||
}
|
||||
|
||||
static int vapic_post_load(void *opaque, int version_id)
|
||||
|
@ -741,7 +746,7 @@ static int vapic_post_load(void *opaque, int version_id)
|
|||
}
|
||||
if (s->state == VAPIC_ACTIVE) {
|
||||
if (smp_cpus == 1) {
|
||||
run_on_cpu(ENV_GET_CPU(first_cpu), do_vapic_enable, s);
|
||||
run_on_cpu(first_cpu, do_vapic_enable, s);
|
||||
} else {
|
||||
zero = g_malloc0(s->rom_state.vapic_size);
|
||||
cpu_physical_memory_rw(s->vapic_paddr, zero,
|
||||
|
|
28
hw/i386/pc.c
28
hw/i386/pc.c
|
@ -160,8 +160,9 @@ void cpu_smm_register(cpu_set_smm_t callback, void *arg)
|
|||
|
||||
void cpu_smm_update(CPUX86State *env)
|
||||
{
|
||||
if (smm_set && smm_arg && env == first_cpu)
|
||||
if (smm_set && smm_arg && CPU(x86_env_get_cpu(env)) == first_cpu) {
|
||||
smm_set(!!(env->hflags & HF_SMM_MASK), smm_arg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -185,18 +186,21 @@ int cpu_get_pic_interrupt(CPUX86State *env)
|
|||
|
||||
static void pic_irq_request(void *opaque, int irq, int level)
|
||||
{
|
||||
CPUX86State *env = first_cpu;
|
||||
CPUState *cs = first_cpu;
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
DPRINTF("pic_irqs: %s irq %d\n", level? "raise" : "lower", irq);
|
||||
if (env->apic_state) {
|
||||
while (env) {
|
||||
while (cs) {
|
||||
cpu = X86_CPU(cs);
|
||||
env = &cpu->env;
|
||||
if (apic_accept_pic_intr(env->apic_state)) {
|
||||
apic_deliver_pic_intr(env->apic_state, level);
|
||||
}
|
||||
env = env->next_cpu;
|
||||
cs = cs->next_cpu;
|
||||
}
|
||||
} else {
|
||||
CPUState *cs = CPU(x86_env_get_cpu(env));
|
||||
if (level) {
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||
} else {
|
||||
|
@ -886,8 +890,9 @@ void pc_init_ne2k_isa(ISABus *bus, NICInfo *nd)
|
|||
|
||||
DeviceState *cpu_get_current_apic(void)
|
||||
{
|
||||
if (cpu_single_env) {
|
||||
return cpu_single_env->apic_state;
|
||||
if (current_cpu) {
|
||||
X86CPU *cpu = X86_CPU(current_cpu);
|
||||
return cpu->env.apic_state;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1176,10 +1181,10 @@ DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus)
|
|||
|
||||
static void cpu_request_exit(void *opaque, int irq, int level)
|
||||
{
|
||||
CPUX86State *env = cpu_single_env;
|
||||
CPUState *cpu = current_cpu;
|
||||
|
||||
if (env && level) {
|
||||
cpu_exit(CPU(x86_env_get_cpu(env)));
|
||||
if (cpu && level) {
|
||||
cpu_exit(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1273,8 +1278,7 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
|
|||
}
|
||||
}
|
||||
|
||||
a20_line = qemu_allocate_irqs(handle_a20_line_change,
|
||||
x86_env_get_cpu(first_cpu), 2);
|
||||
a20_line = qemu_allocate_irqs(handle_a20_line_change, first_cpu, 2);
|
||||
i8042 = isa_create_simple(isa_bus, "i8042");
|
||||
i8042_setup_a20_line(i8042, &a20_line[0]);
|
||||
if (!no_vmport) {
|
||||
|
|
|
@ -229,8 +229,7 @@ static void pc_init1(MemoryRegion *system_memory,
|
|||
if (pci_enabled && acpi_enabled) {
|
||||
i2c_bus *smbus;
|
||||
|
||||
smi_irq = qemu_allocate_irqs(pc_acpi_smi_interrupt,
|
||||
x86_env_get_cpu(first_cpu), 1);
|
||||
smi_irq = qemu_allocate_irqs(pc_acpi_smi_interrupt, first_cpu, 1);
|
||||
/* TODO: Populate SPD eeprom data. */
|
||||
smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100,
|
||||
gsi[9], *smi_irq,
|
||||
|
|
|
@ -10,15 +10,15 @@ common-obj-$(CONFIG_REALVIEW) += realview_gic.o
|
|||
common-obj-$(CONFIG_SLAVIO) += slavio_intctl.o
|
||||
common-obj-$(CONFIG_IOAPIC) += ioapic_common.o
|
||||
common-obj-$(CONFIG_ARM_GIC) += arm_gic_common.o
|
||||
common-obj-$(CONFIG_ARM_GIC) += arm_gic.o
|
||||
common-obj-$(CONFIG_OPENPIC) += openpic.o
|
||||
|
||||
obj-$(CONFIG_APIC) += apic.o apic_common.o
|
||||
obj-$(CONFIG_ARM_GIC) += arm_gic.o
|
||||
obj-$(CONFIG_ARM_GIC_KVM) += arm_gic_kvm.o
|
||||
obj-$(CONFIG_STELLARIS) += armv7m_nvic.o
|
||||
obj-$(CONFIG_EXYNOS4) += exynos4210_gic.o exynos4210_combiner.o
|
||||
obj-$(CONFIG_GRLIB) += grlib_irqmp.o
|
||||
obj-$(CONFIG_IOAPIC) += ioapic.o
|
||||
obj-$(CONFIG_OMAP) += omap_intc.o
|
||||
obj-$(CONFIG_OPENPIC) += openpic.o
|
||||
obj-$(CONFIG_OPENPIC_KVM) += openpic_kvm.o
|
||||
obj-$(CONFIG_SH4) += sh_intc.o
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include "hw/sysbus.h"
|
||||
#include "gic_internal.h"
|
||||
#include "qom/cpu.h"
|
||||
|
||||
//#define DEBUG_GIC
|
||||
|
||||
|
@ -39,8 +40,7 @@ static const uint8_t gic_id[] = {
|
|||
static inline int gic_get_current_cpu(GICState *s)
|
||||
{
|
||||
if (s->num_cpu > 1) {
|
||||
CPUState *cpu = ENV_GET_CPU(cpu_single_env);
|
||||
return cpu->cpu_index;
|
||||
return current_cpu->cpu_index;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -140,6 +140,7 @@ void armv7m_nvic_complete_irq(void *opaque, int irq)
|
|||
|
||||
static uint32_t nvic_readl(nvic_state *s, uint32_t offset)
|
||||
{
|
||||
ARMCPU *cpu;
|
||||
uint32_t val;
|
||||
int irq;
|
||||
|
||||
|
@ -171,7 +172,8 @@ static uint32_t nvic_readl(nvic_state *s, uint32_t offset)
|
|||
case 0x1c: /* SysTick Calibration Value. */
|
||||
return 10000;
|
||||
case 0xd00: /* CPUID Base. */
|
||||
return cpu_single_env->cp15.c0_cpuid;
|
||||
cpu = ARM_CPU(current_cpu);
|
||||
return cpu->env.cp15.c0_cpuid;
|
||||
case 0xd04: /* Interrupt Control State. */
|
||||
/* VECTACTIVE */
|
||||
val = s->gic.running_irq[0];
|
||||
|
@ -206,7 +208,8 @@ static uint32_t nvic_readl(nvic_state *s, uint32_t offset)
|
|||
val |= (1 << 31);
|
||||
return val;
|
||||
case 0xd08: /* Vector Table Offset. */
|
||||
return cpu_single_env->v7m.vecbase;
|
||||
cpu = ARM_CPU(current_cpu);
|
||||
return cpu->env.v7m.vecbase;
|
||||
case 0xd0c: /* Application Interrupt/Reset Control. */
|
||||
return 0xfa05000;
|
||||
case 0xd10: /* System Control. */
|
||||
|
@ -279,6 +282,7 @@ static uint32_t nvic_readl(nvic_state *s, uint32_t offset)
|
|||
|
||||
static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value)
|
||||
{
|
||||
ARMCPU *cpu;
|
||||
uint32_t oldval;
|
||||
switch (offset) {
|
||||
case 0x10: /* SysTick Control and Status. */
|
||||
|
@ -331,7 +335,8 @@ static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value)
|
|||
}
|
||||
break;
|
||||
case 0xd08: /* Vector Table Offset. */
|
||||
cpu_single_env->v7m.vecbase = value & 0xffffff80;
|
||||
cpu = ARM_CPU(current_cpu);
|
||||
cpu->env.v7m.vecbase = value & 0xffffff80;
|
||||
break;
|
||||
case 0xd0c: /* Application Interrupt/Reset Control. */
|
||||
if ((value >> 16) == 0x05fa) {
|
||||
|
|
|
@ -37,10 +37,10 @@
|
|||
#include "hw/ppc/mac.h"
|
||||
#include "hw/pci/pci.h"
|
||||
#include "hw/ppc/openpic.h"
|
||||
#include "hw/ppc/ppc_e500.h"
|
||||
#include "hw/sysbus.h"
|
||||
#include "hw/pci/msi.h"
|
||||
#include "qemu/bitops.h"
|
||||
#include "hw/ppc/ppc.h"
|
||||
|
||||
//#define DEBUG_OPENPIC
|
||||
|
||||
|
@ -180,14 +180,11 @@ static int output_to_inttgt(int output)
|
|||
|
||||
static int get_current_cpu(void)
|
||||
{
|
||||
CPUState *cpu_single_cpu;
|
||||
|
||||
if (!cpu_single_env) {
|
||||
if (!current_cpu) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
|
||||
return cpu_single_cpu->cpu_index;
|
||||
return current_cpu->cpu_index;
|
||||
}
|
||||
|
||||
static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
|
||||
|
|
|
@ -42,16 +42,15 @@ void sh_intc_toggle_source(struct intc_source *source,
|
|||
pending_changed = 1;
|
||||
|
||||
if (pending_changed) {
|
||||
CPUState *cpu = CPU(sh_env_get_cpu(first_cpu));
|
||||
if (source->pending) {
|
||||
source->parent->pending++;
|
||||
if (source->parent->pending == 1) {
|
||||
cpu_interrupt(cpu, CPU_INTERRUPT_HARD);
|
||||
cpu_interrupt(first_cpu, CPU_INTERRUPT_HARD);
|
||||
}
|
||||
} else {
|
||||
source->parent->pending--;
|
||||
if (source->parent->pending == 0) {
|
||||
cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
|
||||
cpu_reset_interrupt(first_cpu, CPU_INTERRUPT_HARD);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -380,7 +380,7 @@ static void ich9_apm_ctrl_changed(uint32_t val, void *arg)
|
|||
|
||||
/* SMI_EN = PMBASE + 30. SMI control and enable register */
|
||||
if (lpc->pm.smi_en & ICH9_PMIO_SMI_EN_APMC_EN) {
|
||||
cpu_interrupt(CPU(x86_env_get_cpu(first_cpu)), CPU_INTERRUPT_SMI);
|
||||
cpu_interrupt(first_cpu, CPU_INTERRUPT_SMI);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -250,10 +250,10 @@ static void network_init (PCIBus *pci_bus)
|
|||
|
||||
static void cpu_request_exit(void *opaque, int irq, int level)
|
||||
{
|
||||
CPUMIPSState *env = cpu_single_env;
|
||||
CPUState *cpu = current_cpu;
|
||||
|
||||
if (env && level) {
|
||||
cpu_exit(CPU(mips_env_get_cpu(env)));
|
||||
if (cpu && level) {
|
||||
cpu_exit(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -99,10 +99,10 @@ static const MemoryRegionOps dma_dummy_ops = {
|
|||
|
||||
static void cpu_request_exit(void *opaque, int irq, int level)
|
||||
{
|
||||
CPUMIPSState *env = cpu_single_env;
|
||||
CPUState *cpu = current_cpu;
|
||||
|
||||
if (env && level) {
|
||||
cpu_exit(CPU(mips_env_get_cpu(env)));
|
||||
if (cpu && level) {
|
||||
cpu_exit(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -770,10 +770,10 @@ static void main_cpu_reset(void *opaque)
|
|||
|
||||
static void cpu_request_exit(void *opaque, int irq, int level)
|
||||
{
|
||||
CPUMIPSState *env = cpu_single_env;
|
||||
CPUState *cpu = current_cpu;
|
||||
|
||||
if (env && level) {
|
||||
cpu_exit(CPU(mips_env_get_cpu(env)));
|
||||
if (cpu && level) {
|
||||
cpu_exit(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -844,7 +844,8 @@ void mips_malta_init(QEMUMachineInitArgs *args)
|
|||
cpu_mips_clock_init(env);
|
||||
qemu_register_reset(main_cpu_reset, cpu);
|
||||
}
|
||||
env = first_cpu;
|
||||
cpu = MIPS_CPU(first_cpu);
|
||||
env = &cpu->env;
|
||||
|
||||
/* allocate RAM */
|
||||
if (ram_size > (256 << 20)) {
|
||||
|
|
|
@ -62,11 +62,13 @@ static uint64_t vmport_ioport_read(void *opaque, hwaddr addr,
|
|||
unsigned size)
|
||||
{
|
||||
VMPortState *s = opaque;
|
||||
CPUX86State *env = cpu_single_env;
|
||||
CPUState *cs = current_cpu;
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
unsigned char command;
|
||||
uint32_t eax;
|
||||
|
||||
cpu_synchronize_state(CPU(x86_env_get_cpu(env)));
|
||||
cpu_synchronize_state(cs);
|
||||
|
||||
eax = env->regs[R_EAX];
|
||||
if (eax != VMPORT_MAGIC)
|
||||
|
@ -89,29 +91,32 @@ static uint64_t vmport_ioport_read(void *opaque, hwaddr addr,
|
|||
static void vmport_ioport_write(void *opaque, hwaddr addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
CPUX86State *env = cpu_single_env;
|
||||
X86CPU *cpu = X86_CPU(current_cpu);
|
||||
|
||||
env->regs[R_EAX] = vmport_ioport_read(opaque, addr, 4);
|
||||
cpu->env.regs[R_EAX] = vmport_ioport_read(opaque, addr, 4);
|
||||
}
|
||||
|
||||
static uint32_t vmport_cmd_get_version(void *opaque, uint32_t addr)
|
||||
{
|
||||
CPUX86State *env = cpu_single_env;
|
||||
env->regs[R_EBX] = VMPORT_MAGIC;
|
||||
X86CPU *cpu = X86_CPU(current_cpu);
|
||||
|
||||
cpu->env.regs[R_EBX] = VMPORT_MAGIC;
|
||||
return 6;
|
||||
}
|
||||
|
||||
static uint32_t vmport_cmd_ram_size(void *opaque, uint32_t addr)
|
||||
{
|
||||
CPUX86State *env = cpu_single_env;
|
||||
env->regs[R_EBX] = 0x1177;
|
||||
X86CPU *cpu = X86_CPU(current_cpu);
|
||||
|
||||
cpu->env.regs[R_EBX] = 0x1177;
|
||||
return ram_size;
|
||||
}
|
||||
|
||||
/* vmmouse helpers */
|
||||
void vmmouse_get_data(uint32_t *data)
|
||||
{
|
||||
CPUX86State *env = cpu_single_env;
|
||||
X86CPU *cpu = X86_CPU(current_cpu);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
data[0] = env->regs[R_EAX]; data[1] = env->regs[R_EBX];
|
||||
data[2] = env->regs[R_ECX]; data[3] = env->regs[R_EDX];
|
||||
|
@ -120,7 +125,8 @@ void vmmouse_get_data(uint32_t *data)
|
|||
|
||||
void vmmouse_set_data(const uint32_t *data)
|
||||
{
|
||||
CPUX86State *env = cpu_single_env;
|
||||
X86CPU *cpu = X86_CPU(current_cpu);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
env->regs[R_EAX] = data[0]; env->regs[R_EBX] = data[1];
|
||||
env->regs[R_ECX] = data[2]; env->regs[R_EDX] = data[3];
|
||||
|
|
|
@ -494,7 +494,6 @@ static DeviceState *ppce500_init_mpic_kvm(PPCE500Params *params,
|
|||
qemu_irq **irqs)
|
||||
{
|
||||
DeviceState *dev;
|
||||
CPUPPCState *env;
|
||||
CPUState *cs;
|
||||
int r;
|
||||
|
||||
|
@ -506,9 +505,7 @@ static DeviceState *ppce500_init_mpic_kvm(PPCE500Params *params,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cs = ENV_GET_CPU(env);
|
||||
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
if (kvm_openpic_connect_vcpu(dev, cs)) {
|
||||
fprintf(stderr, "%s: failed to connect vcpu to irqchip\n",
|
||||
__func__);
|
||||
|
|
|
@ -68,7 +68,8 @@ static uint64_t mpc8544_guts_read(void *opaque, hwaddr addr,
|
|||
unsigned size)
|
||||
{
|
||||
uint32_t value = 0;
|
||||
CPUPPCState *env = cpu_single_env;
|
||||
PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
addr &= MPC8544_GUTS_MMIO_SIZE - 1;
|
||||
switch (addr) {
|
||||
|
|
12
hw/ppc/ppc.c
12
hw/ppc/ppc.c
|
@ -23,6 +23,7 @@
|
|||
*/
|
||||
#include "hw/hw.h"
|
||||
#include "hw/ppc/ppc.h"
|
||||
#include "hw/ppc/ppc_e500.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "hw/timer/m48t59.h"
|
||||
|
@ -440,15 +441,14 @@ void ppce500_irq_init(CPUPPCState *env)
|
|||
/* Enable or Disable the E500 EPR capability */
|
||||
void ppce500_set_mpic_proxy(bool enabled)
|
||||
{
|
||||
CPUPPCState *env;
|
||||
CPUState *cs;
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
||||
env->mpic_proxy = enabled;
|
||||
cpu->env.mpic_proxy = enabled;
|
||||
if (kvm_enabled()) {
|
||||
kvmppc_set_mpic_proxy(POWERPC_CPU(cs), enabled);
|
||||
kvmppc_set_mpic_proxy(cpu, enabled);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -417,10 +417,10 @@ static const MemoryRegionOps PPC_prep_io_ops = {
|
|||
|
||||
static void cpu_request_exit(void *opaque, int irq, int level)
|
||||
{
|
||||
CPUPPCState *env = cpu_single_env;
|
||||
CPUState *cpu = current_cpu;
|
||||
|
||||
if (env && level) {
|
||||
cpu_exit(CPU(ppc_env_get_cpu(env)));
|
||||
if (cpu && level) {
|
||||
cpu_exit(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -605,8 +605,9 @@ static void ppc_prep_init(QEMUMachineInitArgs *args)
|
|||
/* PCI -> ISA bridge */
|
||||
pci = pci_create_simple(pci_bus, PCI_DEVFN(1, 0), "i82378");
|
||||
cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1);
|
||||
cpu = POWERPC_CPU(first_cpu);
|
||||
qdev_connect_gpio_out(&pci->qdev, 0,
|
||||
first_cpu->irq_inputs[PPC6xx_INPUT_INT]);
|
||||
cpu->env.irq_inputs[PPC6xx_INPUT_INT]);
|
||||
qdev_connect_gpio_out(&pci->qdev, 1, *cpu_exit_irq);
|
||||
sysbus_connect_irq(&pcihost->busdev, 0, qdev_get_gpio_in(&pci->qdev, 9));
|
||||
sysbus_connect_irq(&pcihost->busdev, 1, qdev_get_gpio_in(&pci->qdev, 11));
|
||||
|
@ -651,7 +652,8 @@ static void ppc_prep_init(QEMUMachineInitArgs *args)
|
|||
}
|
||||
isa_create_simple(isa_bus, "i8042");
|
||||
|
||||
sysctrl->reset_irq = first_cpu->irq_inputs[PPC6xx_INPUT_HRESET];
|
||||
cpu = POWERPC_CPU(first_cpu);
|
||||
sysctrl->reset_irq = cpu->env.irq_inputs[PPC6xx_INPUT_HRESET];
|
||||
|
||||
portio_list_init(port_list, NULL, prep_portio_list, sysctrl, "prep");
|
||||
portio_list_add(port_list, get_system_io(), 0x0);
|
||||
|
|
|
@ -131,7 +131,6 @@ int spapr_allocate_irq_block(int num, bool lsi)
|
|||
static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr)
|
||||
{
|
||||
int ret = 0, offset;
|
||||
CPUPPCState *env;
|
||||
CPUState *cpu;
|
||||
char cpu_model[32];
|
||||
int smt = kvmppc_smt_threads();
|
||||
|
@ -139,8 +138,7 @@ static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr)
|
|||
|
||||
assert(spapr->cpu_model);
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cpu = CPU(ppc_env_get_cpu(env));
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
uint32_t associativity[] = {cpu_to_be32(0x5),
|
||||
cpu_to_be32(0x0),
|
||||
cpu_to_be32(0x0),
|
||||
|
@ -231,7 +229,7 @@ static void *spapr_create_fdt_skel(const char *cpu_model,
|
|||
uint32_t epow_irq)
|
||||
{
|
||||
void *fdt;
|
||||
CPUPPCState *env;
|
||||
CPUState *cs;
|
||||
uint32_t start_prop = cpu_to_be32(initrd_base);
|
||||
uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size);
|
||||
char hypertas_prop[] = "hcall-pft\0hcall-term\0hcall-dabr\0hcall-interrupt"
|
||||
|
@ -304,10 +302,11 @@ static void *spapr_create_fdt_skel(const char *cpu_model,
|
|||
/* This is needed during FDT finalization */
|
||||
spapr->cpu_model = g_strdup(modelname);
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
CPUState *cpu = CPU(ppc_env_get_cpu(env));
|
||||
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
|
||||
int index = cpu->cpu_index;
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
|
||||
int index = cs->cpu_index;
|
||||
uint32_t servers_prop[smp_threads];
|
||||
uint32_t gservers_prop[smp_threads * 2];
|
||||
char *nodename;
|
||||
|
@ -632,7 +631,7 @@ static void spapr_reset_htab(sPAPREnvironment *spapr)
|
|||
|
||||
static void ppc_spapr_reset(void)
|
||||
{
|
||||
CPUState *first_cpu_cpu;
|
||||
PowerPCCPU *first_ppc_cpu;
|
||||
|
||||
/* Reset the hash table & recalc the RMA */
|
||||
spapr_reset_htab(spapr);
|
||||
|
@ -644,11 +643,11 @@ static void ppc_spapr_reset(void)
|
|||
spapr->rtas_size);
|
||||
|
||||
/* Set up the entry state */
|
||||
first_cpu_cpu = ENV_GET_CPU(first_cpu);
|
||||
first_cpu->gpr[3] = spapr->fdt_addr;
|
||||
first_cpu->gpr[5] = 0;
|
||||
first_cpu_cpu->halted = 0;
|
||||
first_cpu->nip = spapr->entry_point;
|
||||
first_ppc_cpu = POWERPC_CPU(first_cpu);
|
||||
first_ppc_cpu->env.gpr[3] = spapr->fdt_addr;
|
||||
first_ppc_cpu->env.gpr[5] = 0;
|
||||
first_cpu->halted = 0;
|
||||
first_ppc_cpu->env.nip = spapr->entry_point;
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -264,9 +264,8 @@ static void secondary_cpu_reset(void *opaque)
|
|||
|
||||
static void cpu_halt_signal(void *opaque, int irq, int level)
|
||||
{
|
||||
if (level && cpu_single_env) {
|
||||
cpu_interrupt(CPU(sparc_env_get_cpu(cpu_single_env)),
|
||||
CPU_INTERRUPT_HALT);
|
||||
if (level && current_cpu) {
|
||||
cpu_interrupt(current_cpu, CPU_INTERRUPT_HALT);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
common-obj-$(CONFIG_ARM_TIMER) += arm_timer.o
|
||||
common-obj-$(CONFIG_ARM_MPTIMER) += arm_mptimer.o
|
||||
common-obj-$(CONFIG_CADENCE) += cadence_ttc.o
|
||||
common-obj-$(CONFIG_DS1338) += ds1338.o
|
||||
common-obj-$(CONFIG_HPET) += hpet.o
|
||||
|
@ -25,5 +26,4 @@ obj-$(CONFIG_PXA2XX) += pxa2xx_timer.o
|
|||
obj-$(CONFIG_SH4) += sh_timer.o
|
||||
obj-$(CONFIG_TUSB6010) += tusb6010.o
|
||||
|
||||
obj-$(CONFIG_ARM_MPTIMER) += arm_mptimer.o
|
||||
obj-$(CONFIG_MC146818RTC) += mc146818rtc.o
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
#include "hw/sysbus.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "qom/cpu.h"
|
||||
|
||||
/* This device implements the per-cpu private timer and watchdog block
|
||||
* which is used in both the ARM11MPCore and Cortex-A9MP.
|
||||
|
@ -49,13 +50,11 @@ typedef struct {
|
|||
|
||||
static inline int get_current_cpu(ARMMPTimerState *s)
|
||||
{
|
||||
CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
|
||||
|
||||
if (cpu_single_cpu->cpu_index >= s->num_cpu) {
|
||||
if (current_cpu->cpu_index >= s->num_cpu) {
|
||||
hw_error("arm_mptimer: num-cpu %d but this cpu is %d!\n",
|
||||
s->num_cpu, cpu_single_cpu->cpu_index);
|
||||
s->num_cpu, current_cpu->cpu_index);
|
||||
}
|
||||
return cpu_single_cpu->cpu_index;
|
||||
return current_cpu->cpu_index;
|
||||
}
|
||||
|
||||
static inline void timerblock_update_irq(TimerBlock *tb)
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#define CPU_ALL_H
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/tls.h"
|
||||
#include "exec/cpu-common.h"
|
||||
#include "qemu/thread.h"
|
||||
|
||||
|
@ -357,9 +356,6 @@ CPUArchState *cpu_copy(CPUArchState *env);
|
|||
|
||||
void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
|
||||
GCC_FMT_ATTR(2, 3);
|
||||
extern CPUArchState *first_cpu;
|
||||
DECLARE_TLS(CPUArchState *,cpu_single_env);
|
||||
#define cpu_single_env tls_var(cpu_single_env)
|
||||
|
||||
/* Flags for use in ENV->INTERRUPT_PENDING.
|
||||
|
||||
|
|
|
@ -181,7 +181,6 @@ typedef struct CPUWatchpoint {
|
|||
sigjmp_buf jmp_env; \
|
||||
int exception_index; \
|
||||
\
|
||||
CPUArchState *next_cpu; /* next CPU sharing TB cache */ \
|
||||
/* user data */ \
|
||||
void *opaque; \
|
||||
\
|
||||
|
|
|
@ -73,8 +73,6 @@ void ppc6xx_irq_init (CPUPPCState *env);
|
|||
void ppc970_irq_init (CPUPPCState *env);
|
||||
void ppcPOWER7_irq_init (CPUPPCState *env);
|
||||
|
||||
void ppce500_set_mpic_proxy(bool enabled);
|
||||
|
||||
/* PPC machines for OpenBIOS */
|
||||
enum {
|
||||
ARCH_PREP = 0,
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
#ifndef HW_PPC_E500_H
|
||||
#define HW_PPC_E500_H
|
||||
|
||||
void ppce500_set_mpic_proxy(bool enabled);
|
||||
|
||||
#endif
|
|
@ -5,6 +5,7 @@
|
|||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include "qemu/compiler.h"
|
||||
#include "qom/cpu.h"
|
||||
#ifdef NEED_CPU_H
|
||||
#include "disas/disas.h"
|
||||
#endif
|
||||
|
@ -70,22 +71,37 @@ void GCC_FMT_ATTR(2, 3) qemu_log_mask(int mask, const char *fmt, ...);
|
|||
|
||||
/* Special cases: */
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
/* cpu_dump_state() logging functions: */
|
||||
static inline void log_cpu_state(CPUArchState *env1, int flags)
|
||||
/**
|
||||
* log_cpu_state:
|
||||
* @cpu: The CPU whose state is to be logged.
|
||||
* @flags: Flags what to log.
|
||||
*
|
||||
* Logs the output of cpu_dump_state().
|
||||
*/
|
||||
static inline void log_cpu_state(CPUState *cpu, int flags)
|
||||
{
|
||||
if (qemu_log_enabled()) {
|
||||
cpu_dump_state(ENV_GET_CPU(env1), qemu_logfile, fprintf, flags);
|
||||
cpu_dump_state(cpu, qemu_logfile, fprintf, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void log_cpu_state_mask(int mask, CPUArchState *env1, int flags)
|
||||
/**
|
||||
* log_cpu_state_mask:
|
||||
* @mask: Mask when to log.
|
||||
* @cpu: The CPU whose state is to be logged.
|
||||
* @flags: Flags what to log.
|
||||
*
|
||||
* Logs the output of cpu_dump_state() if loglevel includes @mask.
|
||||
*/
|
||||
static inline void log_cpu_state_mask(int mask, CPUState *cpu, int flags)
|
||||
{
|
||||
if (qemu_loglevel & mask) {
|
||||
log_cpu_state(env1, flags);
|
||||
log_cpu_state(cpu, flags);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
/* disas() and target_disas() to qemu_logfile: */
|
||||
static inline void log_target_disas(CPUArchState *env, target_ulong start,
|
||||
target_ulong len, int flags)
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "hw/qdev-core.h"
|
||||
#include "exec/hwaddr.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/tls.h"
|
||||
#include "qemu/typedefs.h"
|
||||
|
||||
typedef int (*WriteCoreDumpFunction)(void *buf, size_t size, void *opaque);
|
||||
|
@ -52,6 +53,7 @@ typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
|
|||
* @class_by_name: Callback to map -cpu command line model name to an
|
||||
* instantiatable CPU type.
|
||||
* @reset: Callback to reset the #CPUState to its initial state.
|
||||
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
|
||||
* @do_interrupt: Callback for interrupt handling.
|
||||
* @do_unassigned_access: Callback for unassigned access handling.
|
||||
* @dump_state: Callback for dumping state.
|
||||
|
@ -71,6 +73,7 @@ typedef struct CPUClass {
|
|||
ObjectClass *(*class_by_name)(const char *cpu_model);
|
||||
|
||||
void (*reset)(CPUState *cpu);
|
||||
int reset_dump_flags;
|
||||
void (*do_interrupt)(CPUState *cpu);
|
||||
CPUUnassignedAccess do_unassigned_access;
|
||||
void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
|
||||
|
@ -113,6 +116,7 @@ struct kvm_run;
|
|||
* CPU and return to its top level loop.
|
||||
* @env_ptr: Pointer to subclass-specific CPUArchState field.
|
||||
* @current_tb: Currently executing TB.
|
||||
* @next_cpu: Next CPU sharing TB cache.
|
||||
* @kvm_fd: vCPU file descriptor for KVM.
|
||||
*
|
||||
* State of one CPU core or thread.
|
||||
|
@ -145,6 +149,7 @@ struct CPUState {
|
|||
|
||||
void *env_ptr; /* CPUArchState */
|
||||
struct TranslationBlock *current_tb;
|
||||
CPUState *next_cpu;
|
||||
|
||||
int kvm_fd;
|
||||
bool kvm_vcpu_dirty;
|
||||
|
@ -156,6 +161,11 @@ struct CPUState {
|
|||
uint32_t halted; /* used by alpha, cris, ppc TCG */
|
||||
};
|
||||
|
||||
extern CPUState *first_cpu;
|
||||
|
||||
DECLARE_TLS(CPUState *, current_cpu);
|
||||
#define current_cpu tls_var(current_cpu)
|
||||
|
||||
/**
|
||||
* cpu_paging_enabled:
|
||||
* @cpu: The CPU whose state is to be inspected.
|
||||
|
|
|
@ -169,11 +169,11 @@ void *kvm_arch_ram_alloc(ram_addr_t size);
|
|||
void kvm_setup_guest_memory(void *start, size_t size);
|
||||
void kvm_flush_coalesced_mmio_buffer(void);
|
||||
|
||||
int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
|
||||
int kvm_insert_breakpoint(CPUArchState *env, target_ulong addr,
|
||||
target_ulong len, int type);
|
||||
int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
|
||||
int kvm_remove_breakpoint(CPUArchState *env, target_ulong addr,
|
||||
target_ulong len, int type);
|
||||
void kvm_remove_all_breakpoints(CPUArchState *current_env);
|
||||
void kvm_remove_all_breakpoints(CPUState *cpu);
|
||||
int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap);
|
||||
#ifndef _WIN32
|
||||
int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset);
|
||||
|
@ -252,9 +252,9 @@ struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
|
|||
|
||||
int kvm_sw_breakpoints_active(CPUState *cpu);
|
||||
|
||||
int kvm_arch_insert_sw_breakpoint(CPUState *current_cpu,
|
||||
int kvm_arch_insert_sw_breakpoint(CPUState *cpu,
|
||||
struct kvm_sw_breakpoint *bp);
|
||||
int kvm_arch_remove_sw_breakpoint(CPUState *current_cpu,
|
||||
int kvm_arch_remove_sw_breakpoint(CPUState *cpu,
|
||||
struct kvm_sw_breakpoint *bp);
|
||||
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
|
||||
target_ulong len, int type);
|
||||
|
|
53
kvm-all.c
53
kvm-all.c
|
@ -1900,16 +1900,15 @@ int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
|
|||
return data.err;
|
||||
}
|
||||
|
||||
int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
|
||||
int kvm_insert_breakpoint(CPUArchState *env, target_ulong addr,
|
||||
target_ulong len, int type)
|
||||
{
|
||||
CPUState *current_cpu = ENV_GET_CPU(current_env);
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
struct kvm_sw_breakpoint *bp;
|
||||
CPUArchState *env;
|
||||
int err;
|
||||
|
||||
if (type == GDB_BREAKPOINT_SW) {
|
||||
bp = kvm_find_sw_breakpoint(current_cpu, addr);
|
||||
bp = kvm_find_sw_breakpoint(cpu, addr);
|
||||
if (bp) {
|
||||
bp->use_count++;
|
||||
return 0;
|
||||
|
@ -1922,14 +1921,13 @@ int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
|
|||
|
||||
bp->pc = addr;
|
||||
bp->use_count = 1;
|
||||
err = kvm_arch_insert_sw_breakpoint(current_cpu, bp);
|
||||
err = kvm_arch_insert_sw_breakpoint(cpu, bp);
|
||||
if (err) {
|
||||
g_free(bp);
|
||||
return err;
|
||||
}
|
||||
|
||||
QTAILQ_INSERT_HEAD(¤t_cpu->kvm_state->kvm_sw_breakpoints,
|
||||
bp, entry);
|
||||
QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
|
||||
} else {
|
||||
err = kvm_arch_insert_hw_breakpoint(addr, len, type);
|
||||
if (err) {
|
||||
|
@ -1937,7 +1935,9 @@ int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
|
|||
}
|
||||
}
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
err = kvm_update_guest_debug(env, 0);
|
||||
if (err) {
|
||||
return err;
|
||||
|
@ -1946,16 +1946,15 @@ int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
|
||||
int kvm_remove_breakpoint(CPUArchState *env, target_ulong addr,
|
||||
target_ulong len, int type)
|
||||
{
|
||||
CPUState *current_cpu = ENV_GET_CPU(current_env);
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
struct kvm_sw_breakpoint *bp;
|
||||
CPUArchState *env;
|
||||
int err;
|
||||
|
||||
if (type == GDB_BREAKPOINT_SW) {
|
||||
bp = kvm_find_sw_breakpoint(current_cpu, addr);
|
||||
bp = kvm_find_sw_breakpoint(cpu, addr);
|
||||
if (!bp) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
@ -1965,12 +1964,12 @@ int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
err = kvm_arch_remove_sw_breakpoint(current_cpu, bp);
|
||||
err = kvm_arch_remove_sw_breakpoint(cpu, bp);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
QTAILQ_REMOVE(¤t_cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
|
||||
QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
|
||||
g_free(bp);
|
||||
} else {
|
||||
err = kvm_arch_remove_hw_breakpoint(addr, len, type);
|
||||
|
@ -1979,7 +1978,9 @@ int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
|
|||
}
|
||||
}
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
err = kvm_update_guest_debug(env, 0);
|
||||
if (err) {
|
||||
return err;
|
||||
|
@ -1988,19 +1989,15 @@ int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvm_remove_all_breakpoints(CPUArchState *current_env)
|
||||
void kvm_remove_all_breakpoints(CPUState *cpu)
|
||||
{
|
||||
CPUState *current_cpu = ENV_GET_CPU(current_env);
|
||||
struct kvm_sw_breakpoint *bp, *next;
|
||||
KVMState *s = current_cpu->kvm_state;
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
KVMState *s = cpu->kvm_state;
|
||||
|
||||
QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
|
||||
if (kvm_arch_remove_sw_breakpoint(current_cpu, bp) != 0) {
|
||||
if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
|
||||
/* Try harder to find a CPU that currently sees the breakpoint. */
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) {
|
||||
break;
|
||||
}
|
||||
|
@ -2011,7 +2008,9 @@ void kvm_remove_all_breakpoints(CPUArchState *current_env)
|
|||
}
|
||||
kvm_arch_remove_all_hw_breakpoints();
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
kvm_update_guest_debug(env, 0);
|
||||
}
|
||||
}
|
||||
|
@ -2023,19 +2022,19 @@ int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
|
||||
int kvm_insert_breakpoint(CPUArchState *env, target_ulong addr,
|
||||
target_ulong len, int type)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
|
||||
int kvm_remove_breakpoint(CPUArchState *env, target_ulong addr,
|
||||
target_ulong len, int type)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void kvm_remove_all_breakpoints(CPUArchState *current_env)
|
||||
void kvm_remove_all_breakpoints(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
#endif /* !KVM_CAP_SET_GUEST_DEBUG */
|
||||
|
|
|
@ -83,19 +83,19 @@ int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
|
||||
int kvm_insert_breakpoint(CPUArchState *env, target_ulong addr,
|
||||
target_ulong len, int type)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
|
||||
int kvm_remove_breakpoint(CPUArchState *env, target_ulong addr,
|
||||
target_ulong len, int type)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void kvm_remove_all_breakpoints(CPUArchState *current_env)
|
||||
void kvm_remove_all_breakpoints(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Alpha specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (c) 2007 Jocelyn Mayer
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUAlphaState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->ir[IR_SP] = newsp;
|
||||
}
|
||||
env->ir[IR_V0] = 0;
|
||||
env->ir[IR_A3] = 0;
|
||||
}
|
||||
|
||||
static inline void cpu_set_tls(CPUAlphaState *env, target_ulong newtls)
|
||||
{
|
||||
env->unique = newtls;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* ARM specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUARMState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->regs[13] = newsp;
|
||||
}
|
||||
env->regs[0] = 0;
|
||||
}
|
||||
|
||||
static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
|
||||
{
|
||||
env->cp15.c13_tls2 = newtls;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* CRIS specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (c) 2007 AXIS Communications AB
|
||||
* Written by Edgar E. Iglesias
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUCRISState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->regs[14] = newsp;
|
||||
}
|
||||
env->regs[10] = 0;
|
||||
}
|
||||
|
||||
static inline void cpu_set_tls(CPUCRISState *env, target_ulong newtls)
|
||||
{
|
||||
env->pregs[PR_PID] = (env->pregs[PR_PID] & 0xff) | newtls;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -125,7 +125,7 @@ typedef abi_int target_pid_t;
|
|||
static const char *get_elf_platform(void)
|
||||
{
|
||||
static char elf_platform[] = "i386";
|
||||
int family = (thread_env->cpuid_version >> 8) & 0xff;
|
||||
int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
|
||||
if (family > 6)
|
||||
family = 6;
|
||||
if (family >= 3)
|
||||
|
@ -137,7 +137,9 @@ static const char *get_elf_platform(void)
|
|||
|
||||
static uint32_t get_elf_hwcap(void)
|
||||
{
|
||||
return thread_env->features[FEAT_1_EDX];
|
||||
X86CPU *cpu = X86_CPU(thread_cpu);
|
||||
|
||||
return cpu->env.features[FEAT_1_EDX];
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
|
@ -404,7 +406,7 @@ static int validate_guest_space(unsigned long guest_base,
|
|||
|
||||
static uint32_t get_elf_hwcap(void)
|
||||
{
|
||||
CPUARMState *e = thread_env;
|
||||
ARMCPU *cpu = ARM_CPU(thread_cpu);
|
||||
uint32_t hwcaps = 0;
|
||||
|
||||
hwcaps |= ARM_HWCAP_ARM_SWP;
|
||||
|
@ -415,7 +417,7 @@ static uint32_t get_elf_hwcap(void)
|
|||
|
||||
/* probe for the extra features */
|
||||
#define GET_FEATURE(feat, hwcap) \
|
||||
do {if (arm_feature(e, feat)) { hwcaps |= hwcap; } } while (0)
|
||||
do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
|
||||
GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
|
||||
GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
|
||||
GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
|
||||
|
@ -619,13 +621,13 @@ enum {
|
|||
|
||||
static uint32_t get_elf_hwcap(void)
|
||||
{
|
||||
CPUPPCState *e = thread_env;
|
||||
PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
|
||||
uint32_t features = 0;
|
||||
|
||||
/* We don't have to be terribly complete here; the high points are
|
||||
Altivec/FP/SPE support. Anything else is just a bonus. */
|
||||
#define GET_FEATURE(flag, feature) \
|
||||
do {if (e->insns_flags & flag) features |= feature; } while(0)
|
||||
do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
|
||||
GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
|
||||
GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
|
||||
GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
|
||||
|
@ -2628,7 +2630,7 @@ static int fill_note_info(struct elf_note_info *info,
|
|||
long signr, const CPUArchState *env)
|
||||
{
|
||||
#define NUMNOTES 3
|
||||
CPUArchState *cpu = NULL;
|
||||
CPUState *cpu = NULL;
|
||||
TaskState *ts = (TaskState *)env->opaque;
|
||||
int i;
|
||||
|
||||
|
@ -2667,9 +2669,10 @@ static int fill_note_info(struct elf_note_info *info,
|
|||
/* read and fill status of all threads */
|
||||
cpu_list_lock();
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
if (cpu == thread_env)
|
||||
if (cpu == thread_cpu) {
|
||||
continue;
|
||||
fill_thread_info(info, cpu);
|
||||
}
|
||||
fill_thread_info(info, (CPUArchState *)cpu->env_ptr);
|
||||
}
|
||||
cpu_list_unlock();
|
||||
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* i386 specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->regs[R_ESP] = newsp;
|
||||
}
|
||||
env->regs[R_EAX] = 0;
|
||||
}
|
||||
|
||||
/* TODO: need to implement cpu_set_tls() */
|
||||
|
||||
#endif
|
|
@ -89,7 +89,8 @@ static int prepare_binprm(struct linux_binprm *bprm)
|
|||
abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp,
|
||||
abi_ulong stringp, int push_ptr)
|
||||
{
|
||||
TaskState *ts = (TaskState *)thread_env->opaque;
|
||||
CPUArchState *env = thread_cpu->env_ptr;
|
||||
TaskState *ts = (TaskState *)env->opaque;
|
||||
int n = sizeof(abi_ulong);
|
||||
abi_ulong envp;
|
||||
abi_ulong argv;
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* m68k specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (c) 2005-2007 CodeSourcery
|
||||
* Written by Paul Brook
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUM68KState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->aregs[7] = newsp;
|
||||
}
|
||||
env->dregs[0] = 0;
|
||||
}
|
||||
|
||||
/* TODO: need to implement cpu_set_tls() */
|
||||
|
||||
#endif
|
|
@ -120,15 +120,15 @@ void fork_end(int child)
|
|||
if (child) {
|
||||
/* Child processes created by fork() only have a single thread.
|
||||
Discard information about the parent threads. */
|
||||
first_cpu = thread_env;
|
||||
thread_env->next_cpu = NULL;
|
||||
first_cpu = thread_cpu;
|
||||
first_cpu->next_cpu = NULL;
|
||||
pending_cpus = 0;
|
||||
pthread_mutex_init(&exclusive_lock, NULL);
|
||||
pthread_mutex_init(&cpu_list_mutex, NULL);
|
||||
pthread_cond_init(&exclusive_cond, NULL);
|
||||
pthread_cond_init(&exclusive_resume, NULL);
|
||||
pthread_mutex_init(&tcg_ctx.tb_ctx.tb_lock, NULL);
|
||||
gdbserver_fork(thread_env);
|
||||
gdbserver_fork((CPUArchState *)thread_cpu->env_ptr);
|
||||
} else {
|
||||
pthread_mutex_unlock(&exclusive_lock);
|
||||
pthread_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
|
||||
|
@ -148,7 +148,6 @@ static inline void exclusive_idle(void)
|
|||
Must only be called from outside cpu_arm_exec. */
|
||||
static inline void start_exclusive(void)
|
||||
{
|
||||
CPUArchState *other;
|
||||
CPUState *other_cpu;
|
||||
|
||||
pthread_mutex_lock(&exclusive_lock);
|
||||
|
@ -156,8 +155,7 @@ static inline void start_exclusive(void)
|
|||
|
||||
pending_cpus = 1;
|
||||
/* Make all other cpus stop executing. */
|
||||
for (other = first_cpu; other; other = other->next_cpu) {
|
||||
other_cpu = ENV_GET_CPU(other);
|
||||
for (other_cpu = first_cpu; other_cpu; other_cpu = other_cpu->next_cpu) {
|
||||
if (other_cpu->running) {
|
||||
pending_cpus++;
|
||||
cpu_exit(other_cpu);
|
||||
|
@ -234,7 +232,7 @@ void fork_start(void)
|
|||
void fork_end(int child)
|
||||
{
|
||||
if (child) {
|
||||
gdbserver_fork(thread_env);
|
||||
gdbserver_fork((CPUArchState *)thread_cpu->env_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1304,11 +1302,12 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
|
|||
|
||||
#define EXCP_DUMP(env, fmt, ...) \
|
||||
do { \
|
||||
CPUState *cs = ENV_GET_CPU(env); \
|
||||
fprintf(stderr, fmt , ## __VA_ARGS__); \
|
||||
cpu_dump_state(ENV_GET_CPU(env), stderr, fprintf, 0); \
|
||||
cpu_dump_state(cs, stderr, fprintf, 0); \
|
||||
qemu_log(fmt, ## __VA_ARGS__); \
|
||||
if (qemu_log_enabled()) { \
|
||||
log_cpu_state(env, 0); \
|
||||
log_cpu_state(cs, 0); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
@ -3152,7 +3151,7 @@ void cpu_loop(CPUS390XState *env)
|
|||
|
||||
#endif /* TARGET_S390X */
|
||||
|
||||
THREAD CPUArchState *thread_env;
|
||||
THREAD CPUState *thread_cpu;
|
||||
|
||||
void task_settid(TaskState *ts)
|
||||
{
|
||||
|
@ -3642,7 +3641,7 @@ int main(int argc, char **argv, char **envp)
|
|||
cpu_reset(ENV_GET_CPU(env));
|
||||
#endif
|
||||
|
||||
thread_env = env;
|
||||
thread_cpu = ENV_GET_CPU(env);
|
||||
|
||||
if (getenv("QEMU_STRACE")) {
|
||||
do_strace = 1;
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* MicroBlaze specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (c) 2009 Edgar E. Iglesias
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUMBState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->regs[R_SP] = newsp;
|
||||
}
|
||||
env->regs[3] = 0;
|
||||
}
|
||||
|
||||
static inline void cpu_set_tls(CPUMBState *env, target_ulong newtls)
|
||||
{
|
||||
env->regs[21] = newtls;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* MIPS specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (c) 2004-2005 Jocelyn Mayer
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUMIPSState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->active_tc.gpr[29] = newsp;
|
||||
}
|
||||
env->active_tc.gpr[7] = 0;
|
||||
env->active_tc.gpr[2] = 0;
|
||||
}
|
||||
|
||||
static inline void cpu_set_tls(CPUMIPSState *env, target_ulong newtls)
|
||||
{
|
||||
env->tls_value = newtls;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1 @@
|
|||
#include "../mips/target_cpu.h"
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* OpenRISC specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUOpenRISCState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->gpr[1] = newsp;
|
||||
}
|
||||
env->gpr[2] = 0;
|
||||
}
|
||||
|
||||
/* TODO: need to implement cpu_set_tls() */
|
||||
|
||||
#endif
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* PowerPC specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (c) 2003-2007 Jocelyn Mayer
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUPPCState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->gpr[1] = newsp;
|
||||
}
|
||||
env->gpr[3] = 0;
|
||||
}
|
||||
|
||||
static inline void cpu_set_tls(CPUPPCState *env, target_ulong newtls)
|
||||
{
|
||||
#if defined(TARGET_PPC64)
|
||||
/* The kernel checks TIF_32BIT here; we don't support loading 32-bit
|
||||
binaries on PPC64 yet. */
|
||||
env->gpr[13] = newtls;
|
||||
#else
|
||||
env->gpr[2] = newtls;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
|
@ -16,6 +16,7 @@
|
|||
#include "exec/user/thunk.h"
|
||||
#include "syscall_defs.h"
|
||||
#include "syscall.h"
|
||||
#include "target_cpu.h"
|
||||
#include "target_signal.h"
|
||||
#include "exec/gdbstub.h"
|
||||
#include "qemu/queue.h"
|
||||
|
@ -197,7 +198,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
|
|||
abi_long arg5, abi_long arg6, abi_long arg7,
|
||||
abi_long arg8);
|
||||
void gemu_log(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
|
||||
extern THREAD CPUArchState *thread_env;
|
||||
extern THREAD CPUState *thread_cpu;
|
||||
void cpu_loop(CPUArchState *env);
|
||||
char *target_strerror(int err);
|
||||
int get_osversion(void);
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* S/390 specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (c) 2009 Ulrich Hecht
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* Contributions after 2012-10-29 are licensed under the terms of the
|
||||
* GNU GPL, version 2 or (at your option) any later version.
|
||||
*
|
||||
* You should have received a copy of the GNU (Lesser) General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUS390XState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->regs[15] = newsp;
|
||||
}
|
||||
env->regs[2] = 0;
|
||||
}
|
||||
|
||||
static inline void cpu_set_tls(CPUS390XState *env, target_ulong newtls)
|
||||
{
|
||||
env->aregs[0] = newtls >> 32;
|
||||
env->aregs[1] = newtls & 0xffffffffULL;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* SH4 specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (c) 2005 Samuel Tardieu
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUSH4State *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->gregs[15] = newsp;
|
||||
}
|
||||
env->gregs[0] = 0;
|
||||
}
|
||||
|
||||
static inline void cpu_set_tls(CPUSH4State *env, target_ulong newtls)
|
||||
{
|
||||
env->gbr = newtls;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -388,17 +388,18 @@ static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
|
|||
/* abort execution with signal */
|
||||
static void QEMU_NORETURN force_sig(int target_sig)
|
||||
{
|
||||
TaskState *ts = (TaskState *)thread_env->opaque;
|
||||
CPUArchState *env = thread_cpu->env_ptr;
|
||||
TaskState *ts = (TaskState *)env->opaque;
|
||||
int host_sig, core_dumped = 0;
|
||||
struct sigaction act;
|
||||
host_sig = target_to_host_signal(target_sig);
|
||||
gdb_signalled(thread_env, target_sig);
|
||||
gdb_signalled(env, target_sig);
|
||||
|
||||
/* dump core if supported by target binary format */
|
||||
if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
|
||||
stop_all_tasks();
|
||||
core_dumped =
|
||||
((*ts->bprm->core_dump)(target_sig, thread_env) == 0);
|
||||
((*ts->bprm->core_dump)(target_sig, env) == 0);
|
||||
}
|
||||
if (core_dumped) {
|
||||
/* we already dumped the core of target process, we don't want
|
||||
|
@ -503,6 +504,7 @@ int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
|
|||
static void host_signal_handler(int host_signum, siginfo_t *info,
|
||||
void *puc)
|
||||
{
|
||||
CPUArchState *env = thread_cpu->env_ptr;
|
||||
int sig;
|
||||
target_siginfo_t tinfo;
|
||||
|
||||
|
@ -522,9 +524,9 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
|
|||
fprintf(stderr, "qemu: got signal %d\n", sig);
|
||||
#endif
|
||||
host_to_target_siginfo_noswap(&tinfo, info);
|
||||
if (queue_signal(thread_env, sig, &tinfo) == 1) {
|
||||
if (queue_signal(env, sig, &tinfo) == 1) {
|
||||
/* interrupt the virtual CPU as soon as possible */
|
||||
cpu_exit(ENV_GET_CPU(thread_env));
|
||||
cpu_exit(thread_cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* SPARC specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
|
||||
* Copyright (C) 2003-2005 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUSPARCState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->regwptr[22] = newsp;
|
||||
}
|
||||
env->regwptr[0] = 0;
|
||||
/* FIXME: Do we also need to clear CF? */
|
||||
/* XXXXX */
|
||||
printf("HELPME: %s:%d\n", __FILE__, __LINE__);
|
||||
}
|
||||
|
||||
/* TODO: need to implement cpu_set_tls() */
|
||||
|
||||
#endif
|
|
@ -0,0 +1 @@
|
|||
#include "../sparc/target_cpu.h"
|
|
@ -4250,8 +4250,8 @@ static void *clone_func(void *arg)
|
|||
|
||||
env = info->env;
|
||||
cpu = ENV_GET_CPU(env);
|
||||
thread_env = env;
|
||||
ts = (TaskState *)thread_env->opaque;
|
||||
thread_cpu = cpu;
|
||||
ts = (TaskState *)env->opaque;
|
||||
info->tid = gettid();
|
||||
cpu->host_tid = info->tid;
|
||||
task_settid(ts);
|
||||
|
@ -5132,6 +5132,9 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
|
|||
abi_long arg5, abi_long arg6, abi_long arg7,
|
||||
abi_long arg8)
|
||||
{
|
||||
#ifdef CONFIG_USE_NPTL
|
||||
CPUState *cpu = ENV_GET_CPU(cpu_env);
|
||||
#endif
|
||||
abi_long ret;
|
||||
struct stat st;
|
||||
struct statfs stfs;
|
||||
|
@ -5146,42 +5149,43 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
|
|||
switch(num) {
|
||||
case TARGET_NR_exit:
|
||||
#ifdef CONFIG_USE_NPTL
|
||||
/* In old applications this may be used to implement _exit(2).
|
||||
However in threaded applictions it is used for thread termination,
|
||||
and _exit_group is used for application termination.
|
||||
Do thread termination if we have more then one thread. */
|
||||
/* FIXME: This probably breaks if a signal arrives. We should probably
|
||||
be disabling signals. */
|
||||
if (first_cpu->next_cpu) {
|
||||
TaskState *ts;
|
||||
CPUArchState **lastp;
|
||||
CPUArchState *p;
|
||||
/* In old applications this may be used to implement _exit(2).
|
||||
However in threaded applictions it is used for thread termination,
|
||||
and _exit_group is used for application termination.
|
||||
Do thread termination if we have more then one thread. */
|
||||
/* FIXME: This probably breaks if a signal arrives. We should probably
|
||||
be disabling signals. */
|
||||
if (first_cpu->next_cpu) {
|
||||
TaskState *ts;
|
||||
CPUState **lastp;
|
||||
CPUState *p;
|
||||
|
||||
cpu_list_lock();
|
||||
lastp = &first_cpu;
|
||||
p = first_cpu;
|
||||
while (p && p != (CPUArchState *)cpu_env) {
|
||||
lastp = &p->next_cpu;
|
||||
p = p->next_cpu;
|
||||
}
|
||||
/* If we didn't find the CPU for this thread then something is
|
||||
horribly wrong. */
|
||||
if (!p)
|
||||
abort();
|
||||
/* Remove the CPU from the list. */
|
||||
*lastp = p->next_cpu;
|
||||
cpu_list_unlock();
|
||||
ts = ((CPUArchState *)cpu_env)->opaque;
|
||||
if (ts->child_tidptr) {
|
||||
put_user_u32(0, ts->child_tidptr);
|
||||
sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
|
||||
NULL, NULL, 0);
|
||||
}
|
||||
thread_env = NULL;
|
||||
object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
|
||||
g_free(ts);
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
cpu_list_lock();
|
||||
lastp = &first_cpu;
|
||||
p = first_cpu;
|
||||
while (p && p != cpu) {
|
||||
lastp = &p->next_cpu;
|
||||
p = p->next_cpu;
|
||||
}
|
||||
/* If we didn't find the CPU for this thread then something is
|
||||
horribly wrong. */
|
||||
if (!p) {
|
||||
abort();
|
||||
}
|
||||
/* Remove the CPU from the list. */
|
||||
*lastp = p->next_cpu;
|
||||
cpu_list_unlock();
|
||||
ts = ((CPUArchState *)cpu_env)->opaque;
|
||||
if (ts->child_tidptr) {
|
||||
put_user_u32(0, ts->child_tidptr);
|
||||
sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
|
||||
NULL, NULL, 0);
|
||||
}
|
||||
thread_cpu = NULL;
|
||||
object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
|
||||
g_free(ts);
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
#endif
|
||||
#ifdef TARGET_GPROF
|
||||
_mcleanup();
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* UniCore32 specific CPU ABI and functions for linux-user
|
||||
*
|
||||
* Copyright (C) 2010-2012 Guan Xuetao
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation, or (at your option) any
|
||||
* later version. See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#ifndef TARGET_CPU_H
|
||||
#define TARGET_CPU_H
|
||||
|
||||
static inline void cpu_clone_regs(CPUUniCore32State *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->regs[29] = newsp;
|
||||
}
|
||||
env->regs[0] = 0;
|
||||
}
|
||||
|
||||
static inline void cpu_set_tls(CPUUniCore32State *env, target_ulong newtls)
|
||||
{
|
||||
env->regs[16] = newtls;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1 @@
|
|||
#include "../i386/target_cpu.h"
|
10
memory.c
10
memory.c
|
@ -838,9 +838,8 @@ static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
|
|||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
|
||||
#endif
|
||||
if (cpu_single_env != NULL) {
|
||||
cpu_unassigned_access(ENV_GET_CPU(cpu_single_env),
|
||||
addr, false, false, 0, size);
|
||||
if (current_cpu != NULL) {
|
||||
cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -851,9 +850,8 @@ static void unassigned_mem_write(void *opaque, hwaddr addr,
|
|||
#ifdef DEBUG_UNASSIGNED
|
||||
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
|
||||
#endif
|
||||
if (cpu_single_env != NULL) {
|
||||
cpu_unassigned_access(ENV_GET_CPU(cpu_single_env),
|
||||
addr, true, false, 0, size);
|
||||
if (current_cpu != NULL) {
|
||||
cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -165,13 +165,13 @@ void memory_mapping_list_init(MemoryMappingList *list)
|
|||
QTAILQ_INIT(&list->head);
|
||||
}
|
||||
|
||||
static CPUArchState *find_paging_enabled_cpu(CPUArchState *start_cpu)
|
||||
static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
|
||||
{
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
|
||||
for (env = start_cpu; env != NULL; env = env->next_cpu) {
|
||||
if (cpu_paging_enabled(ENV_GET_CPU(env))) {
|
||||
return env;
|
||||
for (cpu = start_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
if (cpu_paging_enabled(cpu)) {
|
||||
return cpu;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -180,15 +180,15 @@ static CPUArchState *find_paging_enabled_cpu(CPUArchState *start_cpu)
|
|||
|
||||
void qemu_get_guest_memory_mapping(MemoryMappingList *list, Error **errp)
|
||||
{
|
||||
CPUArchState *env, *first_paging_enabled_cpu;
|
||||
CPUState *cpu, *first_paging_enabled_cpu;
|
||||
RAMBlock *block;
|
||||
ram_addr_t offset, length;
|
||||
|
||||
first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu);
|
||||
if (first_paging_enabled_cpu) {
|
||||
for (env = first_paging_enabled_cpu; env != NULL; env = env->next_cpu) {
|
||||
for (cpu = first_paging_enabled_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
Error *err = NULL;
|
||||
cpu_get_memory_mapping(ENV_GET_CPU(env), list, &err);
|
||||
cpu_get_memory_mapping(cpu, list, &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
return;
|
||||
|
|
|
@ -1806,14 +1806,12 @@ static void do_info_mtree(Monitor *mon, const QDict *qdict)
|
|||
static void do_info_numa(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
int i;
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
|
||||
monitor_printf(mon, "%d nodes\n", nb_numa_nodes);
|
||||
for (i = 0; i < nb_numa_nodes; i++) {
|
||||
monitor_printf(mon, "node %d cpus:", i);
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
if (cpu->numa_node == i) {
|
||||
monitor_printf(mon, " %d", cpu->cpu_index);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "qom/cpu.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "qemu/notify.h"
|
||||
#include "qemu/log.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
typedef struct CPUExistsArgs {
|
||||
|
@ -187,6 +188,13 @@ void cpu_reset(CPUState *cpu)
|
|||
|
||||
static void cpu_common_reset(CPUState *cpu)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
|
||||
log_cpu_state(cpu, cc->reset_dump_flags);
|
||||
}
|
||||
|
||||
cpu->exit_request = 0;
|
||||
cpu->interrupt_request = 0;
|
||||
cpu->current_tb = NULL;
|
||||
|
|
|
@ -67,7 +67,7 @@ typedef struct AlphaCPU {
|
|||
|
||||
static inline AlphaCPU *alpha_env_get_cpu(CPUAlphaState *env)
|
||||
{
|
||||
return ALPHA_CPU(container_of(env, AlphaCPU, env));
|
||||
return container_of(env, AlphaCPU, env);
|
||||
}
|
||||
|
||||
#define ENV_GET_CPU(e) CPU(alpha_env_get_cpu(e))
|
||||
|
|
|
@ -498,22 +498,6 @@ static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc,
|
|||
*pflags = flags;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
static inline void cpu_clone_regs(CPUAlphaState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->ir[IR_SP] = newsp;
|
||||
}
|
||||
env->ir[IR_V0] = 0;
|
||||
env->ir[IR_A3] = 0;
|
||||
}
|
||||
|
||||
static inline void cpu_set_tls(CPUAlphaState *env, target_ulong newtls)
|
||||
{
|
||||
env->unique = newtls;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool cpu_has_work(CPUState *cpu)
|
||||
{
|
||||
/* Here we are checking to see if the CPU should wake up from HALT.
|
||||
|
|
|
@ -3375,10 +3375,11 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline void gen_intermediate_code_internal(CPUAlphaState *env,
|
||||
static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
|
||||
TranslationBlock *tb,
|
||||
int search_pc)
|
||||
bool search_pc)
|
||||
{
|
||||
CPUAlphaState *env = &cpu->env;
|
||||
DisasContext ctx, *ctxp = &ctx;
|
||||
target_ulong pc_start;
|
||||
uint32_t insn;
|
||||
|
@ -3502,12 +3503,12 @@ static inline void gen_intermediate_code_internal(CPUAlphaState *env,
|
|||
|
||||
void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
|
||||
{
|
||||
gen_intermediate_code_internal(env, tb, 0);
|
||||
gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
|
||||
}
|
||||
|
||||
void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
|
||||
{
|
||||
gen_intermediate_code_internal(env, tb, 1);
|
||||
gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
|
||||
}
|
||||
|
||||
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
|
||||
|
|
|
@ -127,7 +127,7 @@ typedef struct ARMCPU {
|
|||
|
||||
static inline ARMCPU *arm_env_get_cpu(CPUARMState *env)
|
||||
{
|
||||
return ARM_CPU(container_of(env, ARMCPU, env));
|
||||
return container_of(env, ARMCPU, env);
|
||||
}
|
||||
|
||||
#define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e))
|
||||
|
|
|
@ -63,11 +63,6 @@ static void arm_cpu_reset(CPUState *s)
|
|||
ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu);
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
acc->parent_reset(s);
|
||||
|
||||
memset(env, 0, offsetof(CPUARMState, breakpoints));
|
||||
|
|
|
@ -249,11 +249,6 @@ int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
|
|||
int mmu_idx);
|
||||
#define cpu_handle_mmu_fault cpu_arm_handle_mmu_fault
|
||||
|
||||
static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
|
||||
{
|
||||
env->cp15.c13_tls2 = newtls;
|
||||
}
|
||||
|
||||
#define CPSR_M (0x1f)
|
||||
#define CPSR_T (1 << 5)
|
||||
#define CPSR_F (1 << 6)
|
||||
|
@ -734,15 +729,6 @@ static inline int cpu_mmu_index (CPUARMState *env)
|
|||
return (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR ? 1 : 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
static inline void cpu_clone_regs(CPUARMState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp)
|
||||
env->regs[13] = newsp;
|
||||
env->regs[0] = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#include "exec/cpu-all.h"
|
||||
|
||||
/* Bit usage in the TB flags field: */
|
||||
|
|
|
@ -9796,10 +9796,11 @@ undef:
|
|||
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
|
||||
basic block 'tb'. If search_pc is TRUE, also generate PC
|
||||
information for each intermediate instruction. */
|
||||
static inline void gen_intermediate_code_internal(CPUARMState *env,
|
||||
static inline void gen_intermediate_code_internal(ARMCPU *cpu,
|
||||
TranslationBlock *tb,
|
||||
int search_pc)
|
||||
bool search_pc)
|
||||
{
|
||||
CPUARMState *env = &cpu->env;
|
||||
DisasContext dc1, *dc = &dc1;
|
||||
CPUBreakpoint *bp;
|
||||
uint16_t *gen_opc_end;
|
||||
|
@ -10072,12 +10073,12 @@ done_generating:
|
|||
|
||||
void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
|
||||
{
|
||||
gen_intermediate_code_internal(env, tb, 0);
|
||||
gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
|
||||
}
|
||||
|
||||
void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
|
||||
{
|
||||
gen_intermediate_code_internal(env, tb, 1);
|
||||
gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
|
||||
}
|
||||
|
||||
static const char *cpu_mode_names[16] = {
|
||||
|
|
|
@ -66,7 +66,7 @@ typedef struct CRISCPU {
|
|||
|
||||
static inline CRISCPU *cris_env_get_cpu(CPUCRISState *env)
|
||||
{
|
||||
return CRIS_CPU(container_of(env, CRISCPU, env));
|
||||
return container_of(env, CRISCPU, env);
|
||||
}
|
||||
|
||||
#define ENV_GET_CPU(e) CPU(cris_env_get_cpu(e))
|
||||
|
|
|
@ -34,11 +34,6 @@ static void cris_cpu_reset(CPUState *s)
|
|||
CPUCRISState *env = &cpu->env;
|
||||
uint32_t vr;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
ccc->parent_reset(s);
|
||||
|
||||
vr = env->pregs[PR_VR];
|
||||
|
|
|
@ -247,20 +247,6 @@ int cpu_cris_handle_mmu_fault(CPUCRISState *env, target_ulong address, int rw,
|
|||
int mmu_idx);
|
||||
#define cpu_handle_mmu_fault cpu_cris_handle_mmu_fault
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
static inline void cpu_clone_regs(CPUCRISState *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp)
|
||||
env->regs[14] = newsp;
|
||||
env->regs[10] = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void cpu_set_tls(CPUCRISState *env, target_ulong newtls)
|
||||
{
|
||||
env->pregs[PR_PID] = (env->pregs[PR_PID] & 0xff) | newtls;
|
||||
}
|
||||
|
||||
/* Support function regs. */
|
||||
#define SFR_RW_GC_CFG 0][0
|
||||
#define SFR_RW_MM_CFG env->pregs[PR_SRS]][0
|
||||
|
|
|
@ -3161,10 +3161,11 @@ static void check_breakpoint(CPUCRISState *env, DisasContext *dc)
|
|||
*/
|
||||
|
||||
/* generate intermediate code for basic block 'tb'. */
|
||||
static void
|
||||
gen_intermediate_code_internal(CPUCRISState *env, TranslationBlock *tb,
|
||||
int search_pc)
|
||||
static inline void
|
||||
gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
|
||||
bool search_pc)
|
||||
{
|
||||
CPUCRISState *env = &cpu->env;
|
||||
uint16_t *gen_opc_end;
|
||||
uint32_t pc_start;
|
||||
unsigned int insn_len;
|
||||
|
@ -3419,12 +3420,12 @@ gen_intermediate_code_internal(CPUCRISState *env, TranslationBlock *tb,
|
|||
|
||||
void gen_intermediate_code (CPUCRISState *env, struct TranslationBlock *tb)
|
||||
{
|
||||
gen_intermediate_code_internal(env, tb, 0);
|
||||
gen_intermediate_code_internal(cris_env_get_cpu(env), tb, false);
|
||||
}
|
||||
|
||||
void gen_intermediate_code_pc (CPUCRISState *env, struct TranslationBlock *tb)
|
||||
{
|
||||
gen_intermediate_code_internal(env, tb, 1);
|
||||
gen_intermediate_code_internal(cris_env_get_cpu(env), tb, true);
|
||||
}
|
||||
|
||||
void cris_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
|
||||
|
|
|
@ -35,7 +35,7 @@ typedef struct {
|
|||
} x86_64_elf_prstatus;
|
||||
|
||||
static int x86_64_write_elf64_note(WriteCoreDumpFunction f,
|
||||
CPUArchState *env, int id,
|
||||
CPUX86State *env, int id,
|
||||
void *opaque)
|
||||
{
|
||||
x86_64_user_regs_struct regs;
|
||||
|
@ -119,7 +119,7 @@ typedef struct {
|
|||
char pad3[4];
|
||||
} x86_elf_prstatus;
|
||||
|
||||
static void x86_fill_elf_prstatus(x86_elf_prstatus *prstatus, CPUArchState *env,
|
||||
static void x86_fill_elf_prstatus(x86_elf_prstatus *prstatus, CPUX86State *env,
|
||||
int id)
|
||||
{
|
||||
memset(prstatus, 0, sizeof(x86_elf_prstatus));
|
||||
|
@ -144,7 +144,7 @@ static void x86_fill_elf_prstatus(x86_elf_prstatus *prstatus, CPUArchState *env,
|
|||
prstatus->pid = id;
|
||||
}
|
||||
|
||||
static int x86_write_elf64_note(WriteCoreDumpFunction f, CPUArchState *env,
|
||||
static int x86_write_elf64_note(WriteCoreDumpFunction f, CPUX86State *env,
|
||||
int id, void *opaque)
|
||||
{
|
||||
x86_elf_prstatus prstatus;
|
||||
|
@ -185,7 +185,8 @@ int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
|
|||
X86CPU *cpu = X86_CPU(cs);
|
||||
int ret;
|
||||
#ifdef TARGET_X86_64
|
||||
bool lma = !!(first_cpu->hflags & HF_LMA_MASK);
|
||||
X86CPU *first_x86_cpu = X86_CPU(first_cpu);
|
||||
bool lma = !!(first_x86_cpu->env.hflags & HF_LMA_MASK);
|
||||
|
||||
if (lma) {
|
||||
ret = x86_64_write_elf64_note(f, &cpu->env, cpuid, opaque);
|
||||
|
@ -273,7 +274,7 @@ static void copy_segment(QEMUCPUSegment *d, SegmentCache *s)
|
|||
d->base = s->base;
|
||||
}
|
||||
|
||||
static void qemu_get_cpustate(QEMUCPUState *s, CPUArchState *env)
|
||||
static void qemu_get_cpustate(QEMUCPUState *s, CPUX86State *env)
|
||||
{
|
||||
memset(s, 0, sizeof(QEMUCPUState));
|
||||
|
||||
|
@ -320,7 +321,7 @@ static void qemu_get_cpustate(QEMUCPUState *s, CPUArchState *env)
|
|||
}
|
||||
|
||||
static inline int cpu_write_qemu_note(WriteCoreDumpFunction f,
|
||||
CPUArchState *env,
|
||||
CPUX86State *env,
|
||||
void *opaque,
|
||||
int type)
|
||||
{
|
||||
|
@ -394,7 +395,9 @@ int cpu_get_dump_info(ArchDumpInfo *info)
|
|||
RAMBlock *block;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
lma = !!(first_cpu->hflags & HF_LMA_MASK);
|
||||
X86CPU *first_x86_cpu = X86_CPU(first_cpu);
|
||||
|
||||
lma = !!(first_x86_cpu->env.hflags & HF_LMA_MASK);
|
||||
#endif
|
||||
|
||||
if (lma) {
|
||||
|
|
|
@ -72,7 +72,7 @@ typedef struct X86CPU {
|
|||
|
||||
static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
|
||||
{
|
||||
return X86_CPU(container_of(env, X86CPU, env));
|
||||
return container_of(env, X86CPU, env);
|
||||
}
|
||||
|
||||
#define ENV_GET_CPU(e) CPU(x86_env_get_cpu(e))
|
||||
|
|
|
@ -2175,11 +2175,6 @@ static void x86_cpu_reset(CPUState *s)
|
|||
CPUX86State *env = &cpu->env;
|
||||
int i;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
|
||||
}
|
||||
|
||||
xcc->parent_reset(s);
|
||||
|
||||
|
||||
|
@ -2523,6 +2518,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
|
|||
|
||||
xcc->parent_reset = cc->reset;
|
||||
cc->reset = x86_cpu_reset;
|
||||
cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
|
||||
|
||||
cc->do_interrupt = x86_cpu_do_interrupt;
|
||||
cc->dump_state = x86_cpu_dump_state;
|
||||
|
|
|
@ -1125,15 +1125,6 @@ static inline target_long lshift(target_long x, int n)
|
|||
/* translate.c */
|
||||
void optimize_flags_init(void);
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp)
|
||||
env->regs[R_ESP] = newsp;
|
||||
env->regs[R_EAX] = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#include "exec/cpu-all.h"
|
||||
#include "svm.h"
|
||||
|
||||
|
@ -1229,7 +1220,7 @@ void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1);
|
|||
/* seg_helper.c */
|
||||
void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
|
||||
|
||||
void do_smm_enter(CPUX86State *env1);
|
||||
void do_smm_enter(X86CPU *cpu);
|
||||
|
||||
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
|
||||
|
||||
|
|
|
@ -1188,6 +1188,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
|
|||
uint64_t status, uint64_t mcg_status, uint64_t addr,
|
||||
uint64_t misc, int flags)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUX86State *cenv = &cpu->env;
|
||||
MCEInjectionParams params = {
|
||||
.mon = mon,
|
||||
|
@ -1200,7 +1201,6 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
|
|||
.flags = flags,
|
||||
};
|
||||
unsigned bank_num = cenv->mcg_cap & 0xff;
|
||||
CPUX86State *env;
|
||||
|
||||
if (!cenv->mcg_cap) {
|
||||
monitor_printf(mon, "MCE injection not supported\n");
|
||||
|
@ -1220,19 +1220,22 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
|
|||
return;
|
||||
}
|
||||
|
||||
run_on_cpu(CPU(cpu), do_inject_x86_mce, ¶ms);
|
||||
run_on_cpu(cs, do_inject_x86_mce, ¶ms);
|
||||
if (flags & MCE_INJECT_BROADCAST) {
|
||||
CPUState *other_cs;
|
||||
|
||||
params.bank = 1;
|
||||
params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
|
||||
params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
|
||||
params.addr = 0;
|
||||
params.misc = 0;
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
if (cenv == env) {
|
||||
for (other_cs = first_cpu; other_cs != NULL;
|
||||
other_cs = other_cs->next_cpu) {
|
||||
if (other_cs == cs) {
|
||||
continue;
|
||||
}
|
||||
params.cpu = x86_env_get_cpu(env);
|
||||
run_on_cpu(CPU(cpu), do_inject_x86_mce, ¶ms);
|
||||
params.cpu = X86_CPU(other_cs);
|
||||
run_on_cpu(other_cs, do_inject_x86_mce, ¶ms);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -345,20 +345,22 @@ int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
|
|||
|
||||
int kvm_arch_on_sigbus(int code, void *addr)
|
||||
{
|
||||
if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
|
||||
X86CPU *cpu = X86_CPU(first_cpu);
|
||||
|
||||
if ((cpu->env.mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
|
||||
ram_addr_t ram_addr;
|
||||
hwaddr paddr;
|
||||
|
||||
/* Hope we are lucky for AO MCE */
|
||||
if (qemu_ram_addr_from_host(addr, &ram_addr) == NULL ||
|
||||
!kvm_physical_memory_addr_from_host(CPU(first_cpu)->kvm_state,
|
||||
!kvm_physical_memory_addr_from_host(first_cpu->kvm_state,
|
||||
addr, &paddr)) {
|
||||
fprintf(stderr, "Hardware memory error for memory used by "
|
||||
"QEMU itself instead of guest system!: %p\n", addr);
|
||||
return 0;
|
||||
}
|
||||
kvm_hwpoison_page_add(ram_addr);
|
||||
kvm_mce_inject(x86_env_get_cpu(first_cpu), paddr, code);
|
||||
kvm_mce_inject(X86_CPU(first_cpu), paddr, code);
|
||||
} else {
|
||||
if (code == BUS_MCEERR_AO) {
|
||||
return 0;
|
||||
|
|
|
@ -610,7 +610,7 @@ void helper_mwait(CPUX86State *env, int next_eip_addend)
|
|||
cpu = x86_env_get_cpu(env);
|
||||
cs = CPU(cpu);
|
||||
/* XXX: not complete but not completely erroneous */
|
||||
if (cs->cpu_index != 0 || env->next_cpu != NULL) {
|
||||
if (cs->cpu_index != 0 || cs->next_cpu != NULL) {
|
||||
/* more than one CPU: do not sleep because another CPU may
|
||||
wake this one */
|
||||
} else {
|
||||
|
|
|
@ -30,11 +30,11 @@
|
|||
|
||||
#ifdef DEBUG_PCALL
|
||||
# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
|
||||
# define LOG_PCALL_STATE(env) \
|
||||
log_cpu_state_mask(CPU_LOG_PCALL, (env), CPU_DUMP_CCOP)
|
||||
# define LOG_PCALL_STATE(cpu) \
|
||||
log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
|
||||
#else
|
||||
# define LOG_PCALL(...) do { } while (0)
|
||||
# define LOG_PCALL_STATE(env) do { } while (0)
|
||||
# define LOG_PCALL_STATE(cpu) do { } while (0)
|
||||
#endif
|
||||
|
||||
/* return non zero if error */
|
||||
|
@ -1160,9 +1160,11 @@ static void handle_even_inj(CPUX86State *env, int intno, int is_int,
|
|||
* the int instruction. next_eip is the env->eip value AFTER the interrupt
|
||||
* instruction. It is only relevant if is_int is TRUE.
|
||||
*/
|
||||
static void do_interrupt_all(CPUX86State *env, int intno, int is_int,
|
||||
static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
|
||||
int error_code, target_ulong next_eip, int is_hw)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_INT)) {
|
||||
if ((env->cr[0] & CR0_PE_MASK)) {
|
||||
static int count;
|
||||
|
@ -1180,7 +1182,7 @@ static void do_interrupt_all(CPUX86State *env, int intno, int is_int,
|
|||
qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
|
||||
}
|
||||
qemu_log("\n");
|
||||
log_cpu_state(env, CPU_DUMP_CCOP);
|
||||
log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
|
||||
#if 0
|
||||
{
|
||||
int i;
|
||||
|
@ -1252,7 +1254,7 @@ void x86_cpu_do_interrupt(CPUState *cs)
|
|||
/* simulate a real cpu exception. On i386, it can
|
||||
trigger new exceptions, but we do not handle
|
||||
double or triple faults yet. */
|
||||
do_interrupt_all(env, env->exception_index,
|
||||
do_interrupt_all(cpu, env->exception_index,
|
||||
env->exception_is_int,
|
||||
env->error_code,
|
||||
env->exception_next_eip, 0);
|
||||
|
@ -1263,7 +1265,7 @@ void x86_cpu_do_interrupt(CPUState *cs)
|
|||
|
||||
void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
|
||||
{
|
||||
do_interrupt_all(env, intno, 0, 0, 0, is_hw);
|
||||
do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
|
||||
}
|
||||
|
||||
void helper_enter_level(CPUX86State *env, int level, int data32,
|
||||
|
@ -1684,7 +1686,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
|
|||
|
||||
next_eip = env->eip + next_eip_addend;
|
||||
LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
|
||||
LOG_PCALL_STATE(env);
|
||||
LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
|
||||
if ((new_cs & 0xfffc) == 0) {
|
||||
raise_exception_err(env, EXCP0D_GPF, 0);
|
||||
}
|
||||
|
@ -2018,7 +2020,7 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
|
|||
}
|
||||
LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
|
||||
new_cs, new_eip, shift, addend);
|
||||
LOG_PCALL_STATE(env);
|
||||
LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
|
||||
if ((new_cs & 0xfffc) == 0) {
|
||||
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
void do_smm_enter(CPUX86State *env)
|
||||
void do_smm_enter(X86CPU *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -40,14 +40,15 @@ void helper_rsm(CPUX86State *env)
|
|||
#define SMM_REVISION_ID 0x00020000
|
||||
#endif
|
||||
|
||||
void do_smm_enter(CPUX86State *env)
|
||||
void do_smm_enter(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
target_ulong sm_state;
|
||||
SegmentCache *dt;
|
||||
int i, offset;
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
|
||||
log_cpu_state_mask(CPU_LOG_INT, env, CPU_DUMP_CCOP);
|
||||
log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
|
||||
|
||||
env->hflags |= HF_SMM_MASK;
|
||||
cpu_smm_update(env);
|
||||
|
@ -179,6 +180,7 @@ void do_smm_enter(CPUX86State *env)
|
|||
|
||||
void helper_rsm(CPUX86State *env)
|
||||
{
|
||||
X86CPU *cpu = x86_env_get_cpu(env);
|
||||
target_ulong sm_state;
|
||||
int i, offset;
|
||||
uint32_t val;
|
||||
|
@ -295,7 +297,7 @@ void helper_rsm(CPUX86State *env)
|
|||
cpu_smm_update(env);
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
|
||||
log_cpu_state_mask(CPU_LOG_INT, env, CPU_DUMP_CCOP);
|
||||
log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
|
|
@ -8251,10 +8251,11 @@ void optimize_flags_init(void)
|
|||
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
|
||||
basic block 'tb'. If search_pc is TRUE, also generate PC
|
||||
information for each intermediate instruction. */
|
||||
static inline void gen_intermediate_code_internal(CPUX86State *env,
|
||||
static inline void gen_intermediate_code_internal(X86CPU *cpu,
|
||||
TranslationBlock *tb,
|
||||
int search_pc)
|
||||
bool search_pc)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
DisasContext dc1, *dc = &dc1;
|
||||
target_ulong pc_ptr;
|
||||
uint16_t *gen_opc_end;
|
||||
|
@ -8428,12 +8429,12 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
|
|||
|
||||
void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
|
||||
{
|
||||
gen_intermediate_code_internal(env, tb, 0);
|
||||
gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
|
||||
}
|
||||
|
||||
void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
|
||||
{
|
||||
gen_intermediate_code_internal(env, tb, 1);
|
||||
gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
|
||||
}
|
||||
|
||||
void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
|
||||
|
|
|
@ -64,7 +64,7 @@ typedef struct LM32CPU {
|
|||
|
||||
static inline LM32CPU *lm32_env_get_cpu(CPULM32State *env)
|
||||
{
|
||||
return LM32_CPU(container_of(env, LM32CPU, env));
|
||||
return container_of(env, LM32CPU, env);
|
||||
}
|
||||
|
||||
#define ENV_GET_CPU(e) CPU(lm32_env_get_cpu(e))
|
||||
|
|
|
@ -29,11 +29,6 @@ static void lm32_cpu_reset(CPUState *s)
|
|||
LM32CPUClass *lcc = LM32_CPU_GET_CLASS(cpu);
|
||||
CPULM32State *env = &cpu->env;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
lcc->parent_reset(s);
|
||||
|
||||
/* reset cpu state */
|
||||
|
|
|
@ -215,20 +215,6 @@ int cpu_lm32_handle_mmu_fault(CPULM32State *env, target_ulong address, int rw,
|
|||
int mmu_idx);
|
||||
#define cpu_handle_mmu_fault cpu_lm32_handle_mmu_fault
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
static inline void cpu_clone_regs(CPULM32State *env, target_ulong newsp)
|
||||
{
|
||||
if (newsp) {
|
||||
env->regs[R_SP] = newsp;
|
||||
}
|
||||
env->regs[R_R1] = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void cpu_set_tls(CPULM32State *env, target_ulong newtls)
|
||||
{
|
||||
}
|
||||
|
||||
#include "exec/cpu-all.h"
|
||||
|
||||
static inline void cpu_get_tb_cpu_state(CPULM32State *env, target_ulong *pc,
|
||||
|
|
|
@ -70,7 +70,7 @@ void lm32_cpu_do_interrupt(CPUState *cs)
|
|||
} else {
|
||||
env->pc = env->eba + (env->exception_index * 32);
|
||||
}
|
||||
log_cpu_state_mask(CPU_LOG_INT, env, 0);
|
||||
log_cpu_state_mask(CPU_LOG_INT, cs, 0);
|
||||
break;
|
||||
case EXCP_BREAKPOINT:
|
||||
case EXCP_WATCHPOINT:
|
||||
|
@ -79,7 +79,7 @@ void lm32_cpu_do_interrupt(CPUState *cs)
|
|||
env->ie |= (env->ie & IE_IE) ? IE_BIE : 0;
|
||||
env->ie &= ~IE_IE;
|
||||
env->pc = env->deba + (env->exception_index * 32);
|
||||
log_cpu_state_mask(CPU_LOG_INT, env, 0);
|
||||
log_cpu_state_mask(CPU_LOG_INT, cs, 0);
|
||||
break;
|
||||
default:
|
||||
cpu_abort(env, "unhandled exception type=%d\n",
|
||||
|
|
|
@ -1011,9 +1011,11 @@ static void check_breakpoint(CPULM32State *env, DisasContext *dc)
|
|||
}
|
||||
|
||||
/* generate intermediate code for basic block 'tb'. */
|
||||
static void gen_intermediate_code_internal(CPULM32State *env,
|
||||
TranslationBlock *tb, int search_pc)
|
||||
static inline
|
||||
void gen_intermediate_code_internal(LM32CPU *cpu,
|
||||
TranslationBlock *tb, bool search_pc)
|
||||
{
|
||||
CPULM32State *env = &cpu->env;
|
||||
struct DisasContext ctx, *dc = &ctx;
|
||||
uint16_t *gen_opc_end;
|
||||
uint32_t pc_start;
|
||||
|
@ -1133,12 +1135,12 @@ static void gen_intermediate_code_internal(CPULM32State *env,
|
|||
|
||||
void gen_intermediate_code(CPULM32State *env, struct TranslationBlock *tb)
|
||||
{
|
||||
gen_intermediate_code_internal(env, tb, 0);
|
||||
gen_intermediate_code_internal(lm32_env_get_cpu(env), tb, false);
|
||||
}
|
||||
|
||||
void gen_intermediate_code_pc(CPULM32State *env, struct TranslationBlock *tb)
|
||||
{
|
||||
gen_intermediate_code_internal(env, tb, 1);
|
||||
gen_intermediate_code_internal(lm32_env_get_cpu(env), tb, true);
|
||||
}
|
||||
|
||||
void lm32_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue