mirror of https://gitee.com/openkylin/qemu.git
sparc: embed sparc_def_t into CPUSPARCState
Make CPUSPARCState::def embedded so it would be allocated as part of cpu instance and we won't have to worry about cleaning def pointer up mannualy on cpu destruction. Signed-off-by: Igor Mammedov <imammedo@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Message-Id: <1503592308-93913-4-git-send-email-imammedo@redhat.com> Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
This commit is contained in:
parent
12a6c15ef3
commit
576e1c4c23
|
@ -31,7 +31,7 @@ struct target_pt_regs {
|
|||
|
||||
static inline abi_ulong target_shmlba(CPUSPARCState *env)
|
||||
{
|
||||
if (!(env->def->features & CPU_FEATURE_FLUSH)) {
|
||||
if (!(env->def.features & CPU_FEATURE_FLUSH)) {
|
||||
return 64 * 1024;
|
||||
} else {
|
||||
return 256 * 1024;
|
||||
|
|
|
@ -66,7 +66,7 @@ static void sparc_cpu_reset(CPUState *s)
|
|||
env->lsu = 0;
|
||||
#else
|
||||
env->mmuregs[0] &= ~(MMU_E | MMU_NF);
|
||||
env->mmuregs[0] |= env->def->mmu_bm;
|
||||
env->mmuregs[0] |= env->def.mmu_bm;
|
||||
#endif
|
||||
env->pc = 0;
|
||||
env->npc = env->pc + 4;
|
||||
|
@ -120,18 +120,18 @@ static int cpu_sparc_register(SPARCCPU *cpu, const char *cpu_model)
|
|||
return -1;
|
||||
}
|
||||
|
||||
env->version = env->def->iu_version;
|
||||
env->fsr = env->def->fpu_version;
|
||||
env->nwindows = env->def->nwindows;
|
||||
env->version = env->def.iu_version;
|
||||
env->fsr = env->def.fpu_version;
|
||||
env->nwindows = env->def.nwindows;
|
||||
#if !defined(TARGET_SPARC64)
|
||||
env->mmuregs[0] |= env->def->mmu_version;
|
||||
env->mmuregs[0] |= env->def.mmu_version;
|
||||
cpu_sparc_set_id(env, 0);
|
||||
env->mxccregs[7] |= env->def->mxcc_version;
|
||||
env->mxccregs[7] |= env->def.mxcc_version;
|
||||
#else
|
||||
env->mmu_version = env->def->mmu_version;
|
||||
env->maxtl = env->def->maxtl;
|
||||
env->version |= env->def->maxtl << 8;
|
||||
env->version |= env->def->nwindows - 1;
|
||||
env->mmu_version = env->def.mmu_version;
|
||||
env->maxtl = env->def.maxtl;
|
||||
env->version |= env->def.maxtl << 8;
|
||||
env->version |= env->def.nwindows - 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@ -557,7 +557,7 @@ static void sparc_cpu_parse_features(CPUState *cs, char *features,
|
|||
Error **errp)
|
||||
{
|
||||
SPARCCPU *cpu = SPARC_CPU(cs);
|
||||
sparc_def_t *cpu_def = cpu->env.def;
|
||||
sparc_def_t *cpu_def = &cpu->env.def;
|
||||
char *featurestr;
|
||||
uint32_t plus_features = 0;
|
||||
uint32_t minus_features = 0;
|
||||
|
@ -818,8 +818,8 @@ static void sparc_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
SPARCCPU *cpu = SPARC_CPU(dev);
|
||||
CPUSPARCState *env = &cpu->env;
|
||||
|
||||
if ((env->def->features & CPU_FEATURE_FLOAT)) {
|
||||
env->def->features |= CPU_FEATURE_FLOAT128;
|
||||
if ((env->def.features & CPU_FEATURE_FLOAT)) {
|
||||
env->def.features |= CPU_FEATURE_FLOAT128;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -847,15 +847,9 @@ static void sparc_cpu_initfn(Object *obj)
|
|||
gen_intermediate_code_init(env);
|
||||
}
|
||||
|
||||
env->def = g_memdup(scc->cpu_def, sizeof(*scc->cpu_def));
|
||||
}
|
||||
|
||||
static void sparc_cpu_uninitfn(Object *obj)
|
||||
{
|
||||
SPARCCPU *cpu = SPARC_CPU(obj);
|
||||
CPUSPARCState *env = &cpu->env;
|
||||
|
||||
g_free(env->def);
|
||||
if (scc->cpu_def) {
|
||||
env->def = *scc->cpu_def;
|
||||
}
|
||||
}
|
||||
|
||||
static void sparc_cpu_class_init(ObjectClass *oc, void *data)
|
||||
|
@ -904,7 +898,6 @@ static const TypeInfo sparc_cpu_type_info = {
|
|||
.parent = TYPE_CPU,
|
||||
.instance_size = sizeof(SPARCCPU),
|
||||
.instance_init = sparc_cpu_initfn,
|
||||
.instance_finalize = sparc_cpu_uninitfn,
|
||||
.abstract = true,
|
||||
.class_size = sizeof(SPARCCPUClass),
|
||||
.class_init = sparc_cpu_class_init,
|
||||
|
|
|
@ -529,7 +529,7 @@ struct CPUSPARCState {
|
|||
#define SOFTINT_INTRMASK (0xFFFE)
|
||||
#define SOFTINT_REG_MASK (SOFTINT_STIMER|SOFTINT_INTRMASK|SOFTINT_TIMER)
|
||||
#endif
|
||||
sparc_def_t *def;
|
||||
sparc_def_t def;
|
||||
|
||||
void *irq_manager;
|
||||
void (*qemu_irq_ack)(CPUSPARCState *env, void *irq_manager, int intno);
|
||||
|
@ -679,7 +679,7 @@ int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc);
|
|||
#if defined (TARGET_SPARC64)
|
||||
static inline int cpu_has_hypervisor(CPUSPARCState *env1)
|
||||
{
|
||||
return env1->def->features & CPU_FEATURE_HYPV;
|
||||
return env1->def.features & CPU_FEATURE_HYPV;
|
||||
}
|
||||
|
||||
static inline int cpu_hypervisor_mode(CPUSPARCState *env1)
|
||||
|
@ -788,14 +788,14 @@ static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc,
|
|||
if (env->pstate & PS_AM) {
|
||||
flags |= TB_FLAG_AM_ENABLED;
|
||||
}
|
||||
if ((env->def->features & CPU_FEATURE_FLOAT)
|
||||
if ((env->def.features & CPU_FEATURE_FLOAT)
|
||||
&& (env->pstate & PS_PEF)
|
||||
&& (env->fprs & FPRS_FEF)) {
|
||||
flags |= TB_FLAG_FPU_ENABLED;
|
||||
}
|
||||
flags |= env->asi << TB_FLAG_ASI_SHIFT;
|
||||
#else
|
||||
if ((env->def->features & CPU_FEATURE_FLOAT) && env->psref) {
|
||||
if ((env->def.features & CPU_FEATURE_FLOAT) && env->psref) {
|
||||
flags |= TB_FLAG_FPU_ENABLED;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -108,7 +108,7 @@ void sparc_cpu_do_interrupt(CPUState *cs)
|
|||
#if !defined(CONFIG_USER_ONLY)
|
||||
if (env->psret == 0) {
|
||||
if (cs->exception_index == 0x80 &&
|
||||
env->def->features & CPU_FEATURE_TA0_SHUTDOWN) {
|
||||
env->def.features & CPU_FEATURE_TA0_SHUTDOWN) {
|
||||
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
|
||||
} else {
|
||||
cpu_abort(cs, "Trap 0x%02x while interrupts disabled, Error state",
|
||||
|
|
|
@ -147,7 +147,7 @@ void sparc_cpu_do_interrupt(CPUState *cs)
|
|||
}
|
||||
}
|
||||
|
||||
if (env->def->features & CPU_FEATURE_GL) {
|
||||
if (env->def.features & CPU_FEATURE_GL) {
|
||||
tsptr->tstate |= (env->gl & 7ULL) << 40;
|
||||
cpu_gl_switch_gregs(env, env->gl + 1);
|
||||
env->gl++;
|
||||
|
|
|
@ -513,7 +513,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
|
|||
case 0x00: /* Leon3 Cache Control */
|
||||
case 0x08: /* Leon3 Instruction Cache config */
|
||||
case 0x0C: /* Leon3 Date Cache config */
|
||||
if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
|
||||
if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
|
||||
ret = leon3_cache_control_ld(env, addr, size);
|
||||
}
|
||||
break;
|
||||
|
@ -736,7 +736,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
|
|||
case 0x00: /* Leon3 Cache Control */
|
||||
case 0x08: /* Leon3 Instruction Cache config */
|
||||
case 0x0C: /* Leon3 Date Cache config */
|
||||
if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
|
||||
if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
|
||||
leon3_cache_control_st(env, addr, val, size);
|
||||
}
|
||||
break;
|
||||
|
@ -904,15 +904,15 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
|
|||
/* Mappings generated during no-fault mode
|
||||
are invalid in normal mode. */
|
||||
if ((oldreg ^ env->mmuregs[reg])
|
||||
& (MMU_NF | env->def->mmu_bm)) {
|
||||
& (MMU_NF | env->def.mmu_bm)) {
|
||||
tlb_flush(CPU(cpu));
|
||||
}
|
||||
break;
|
||||
case 1: /* Context Table Pointer Register */
|
||||
env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
|
||||
env->mmuregs[reg] = val & env->def.mmu_ctpr_mask;
|
||||
break;
|
||||
case 2: /* Context Register */
|
||||
env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
|
||||
env->mmuregs[reg] = val & env->def.mmu_cxr_mask;
|
||||
if (oldreg != env->mmuregs[reg]) {
|
||||
/* we flush when the MMU context changes because
|
||||
QEMU has no MMU context support */
|
||||
|
@ -923,11 +923,11 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
|
|||
case 4: /* Synchronous Fault Address Register */
|
||||
break;
|
||||
case 0x10: /* TLB Replacement Control Register */
|
||||
env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
|
||||
env->mmuregs[reg] = val & env->def.mmu_trcr_mask;
|
||||
break;
|
||||
case 0x13: /* Synchronous Fault Status Register with Read
|
||||
and Clear */
|
||||
env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
|
||||
env->mmuregs[3] = val & env->def.mmu_sfsr_mask;
|
||||
break;
|
||||
case 0x14: /* Synchronous Fault Address Register */
|
||||
env->mmuregs[4] = val;
|
||||
|
|
|
@ -95,7 +95,7 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
|
|||
if (mmu_idx == MMU_PHYS_IDX) {
|
||||
*page_size = TARGET_PAGE_SIZE;
|
||||
/* Boot mode: instruction fetches are taken from PROM */
|
||||
if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) {
|
||||
if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) {
|
||||
*physical = env->prom_addr | (address & 0x7ffffULL);
|
||||
*prot = PAGE_READ | PAGE_EXEC;
|
||||
return 0;
|
||||
|
|
|
@ -5756,7 +5756,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock * tb)
|
|||
dc->npc = (target_ulong) tb->cs_base;
|
||||
dc->cc_op = CC_OP_DYNAMIC;
|
||||
dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
|
||||
dc->def = env->def;
|
||||
dc->def = &env->def;
|
||||
dc->fpu_enabled = tb_fpu_enabled(tb->flags);
|
||||
dc->address_mask_32bit = tb_am_enabled(tb->flags);
|
||||
dc->singlestep = (cs->singlestep_enabled || singlestep);
|
||||
|
|
|
@ -295,7 +295,7 @@ void helper_wrcwp(CPUSPARCState *env, target_ulong new_cwp)
|
|||
|
||||
static inline uint64_t *get_gregset(CPUSPARCState *env, uint32_t pstate)
|
||||
{
|
||||
if (env->def->features & CPU_FEATURE_GL) {
|
||||
if (env->def.features & CPU_FEATURE_GL) {
|
||||
return env->glregs + (env->gl & 7) * 8;
|
||||
}
|
||||
|
||||
|
@ -343,7 +343,7 @@ void cpu_change_pstate(CPUSPARCState *env, uint32_t new_pstate)
|
|||
uint32_t pstate_regs, new_pstate_regs;
|
||||
uint64_t *src, *dst;
|
||||
|
||||
if (env->def->features & CPU_FEATURE_GL) {
|
||||
if (env->def.features & CPU_FEATURE_GL) {
|
||||
/* PS_AG, IG and MG are not implemented in this case */
|
||||
new_pstate &= ~(PS_AG | PS_IG | PS_MG);
|
||||
env->pstate = new_pstate;
|
||||
|
|
Loading…
Reference in New Issue