mirror of https://gitee.com/openkylin/linux.git
powerpc: create giveup_all()
Create a single function that gives everything up (FP, VMX, VSX, SPE). Doing this all at once means we only do one MSR write. A context switch microbenchmark using yield(): http://ozlabs.org/~anton/junkcode/context_switch2.c ./context_switch2 --test=yield --fp --altivec --vector 0 0 shows an improvement of 3% on POWER8. Signed-off-by: Anton Blanchard <anton@samba.org> [mpe: giveup_all() needs to be EXPORT_SYMBOL'ed] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
1f2e25b2d5
commit
c208505900
|
@ -26,6 +26,7 @@ extern void __giveup_vsx(struct task_struct *);
|
||||||
extern void giveup_vsx(struct task_struct *);
|
extern void giveup_vsx(struct task_struct *);
|
||||||
extern void enable_kernel_spe(void);
|
extern void enable_kernel_spe(void);
|
||||||
extern void load_up_spe(struct task_struct *);
|
extern void load_up_spe(struct task_struct *);
|
||||||
|
extern void giveup_all(struct task_struct *);
|
||||||
extern void switch_booke_debug_regs(struct debug_reg *new_debug);
|
extern void switch_booke_debug_regs(struct debug_reg *new_debug);
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_FPU
|
#ifdef CONFIG_PPC_FPU
|
||||||
|
|
|
@ -308,6 +308,65 @@ void flush_spe_to_thread(struct task_struct *tsk)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SPE */
|
#endif /* CONFIG_SPE */
|
||||||
|
|
||||||
|
static unsigned long msr_all_available;
|
||||||
|
|
||||||
|
static int __init init_msr_all_available(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_PPC_FPU
|
||||||
|
msr_all_available |= MSR_FP;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_ALTIVEC
|
||||||
|
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||||
|
msr_all_available |= MSR_VEC;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_VSX
|
||||||
|
if (cpu_has_feature(CPU_FTR_VSX))
|
||||||
|
msr_all_available |= MSR_VSX;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_SPE
|
||||||
|
if (cpu_has_feature(CPU_FTR_SPE))
|
||||||
|
msr_all_available |= MSR_SPE;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_initcall(init_msr_all_available);
|
||||||
|
|
||||||
|
void giveup_all(struct task_struct *tsk)
|
||||||
|
{
|
||||||
|
unsigned long usermsr;
|
||||||
|
|
||||||
|
if (!tsk->thread.regs)
|
||||||
|
return;
|
||||||
|
|
||||||
|
usermsr = tsk->thread.regs->msr;
|
||||||
|
|
||||||
|
if ((usermsr & msr_all_available) == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
msr_check_and_set(msr_all_available);
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_FPU
|
||||||
|
if (usermsr & MSR_FP)
|
||||||
|
__giveup_fpu(tsk);
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_ALTIVEC
|
||||||
|
if (usermsr & MSR_VEC)
|
||||||
|
__giveup_altivec(tsk);
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_VSX
|
||||||
|
if (usermsr & MSR_VSX)
|
||||||
|
__giveup_vsx(tsk);
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_SPE
|
||||||
|
if (usermsr & MSR_SPE)
|
||||||
|
__giveup_spe(tsk);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
msr_check_and_clear(msr_all_available);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(giveup_all);
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||||
void do_send_trap(struct pt_regs *regs, unsigned long address,
|
void do_send_trap(struct pt_regs *regs, unsigned long address,
|
||||||
unsigned long error_code, int signal_code, int breakpt)
|
unsigned long error_code, int signal_code, int breakpt)
|
||||||
|
@ -839,21 +898,8 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
||||||
|
|
||||||
__switch_to_tm(prev);
|
__switch_to_tm(prev);
|
||||||
|
|
||||||
if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
|
/* Save FPU, Altivec, VSX and SPE state */
|
||||||
giveup_fpu(prev);
|
giveup_all(prev);
|
||||||
#ifdef CONFIG_ALTIVEC
|
|
||||||
if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
|
|
||||||
giveup_altivec(prev);
|
|
||||||
#endif /* CONFIG_ALTIVEC */
|
|
||||||
#ifdef CONFIG_VSX
|
|
||||||
if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
|
|
||||||
/* VMX and FPU registers are already save here */
|
|
||||||
__giveup_vsx(prev);
|
|
||||||
#endif /* CONFIG_VSX */
|
|
||||||
#ifdef CONFIG_SPE
|
|
||||||
if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
|
|
||||||
giveup_spe(prev);
|
|
||||||
#endif /* CONFIG_SPE */
|
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||||
switch_booke_debug_regs(&new->thread.debug);
|
switch_booke_debug_regs(&new->thread.debug);
|
||||||
|
|
|
@ -1490,21 +1490,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||||
goto out;
|
goto out;
|
||||||
/* interrupts now hard-disabled */
|
/* interrupts now hard-disabled */
|
||||||
|
|
||||||
/* Save FPU state in thread_struct */
|
/* Save FPU, Altivec and VSX state */
|
||||||
if (current->thread.regs->msr & MSR_FP)
|
giveup_all(current);
|
||||||
giveup_fpu(current);
|
|
||||||
|
|
||||||
#ifdef CONFIG_ALTIVEC
|
|
||||||
/* Save Altivec state in thread_struct */
|
|
||||||
if (current->thread.regs->msr & MSR_VEC)
|
|
||||||
giveup_altivec(current);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_VSX
|
|
||||||
/* Save VSX state in thread_struct */
|
|
||||||
if (current->thread.regs->msr & MSR_VSX)
|
|
||||||
__giveup_vsx(current);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Preload FPU if it's enabled */
|
/* Preload FPU if it's enabled */
|
||||||
if (kvmppc_get_msr(vcpu) & MSR_FP)
|
if (kvmppc_get_msr(vcpu) & MSR_FP)
|
||||||
|
|
Loading…
Reference in New Issue