Merge branch 'tip-x86-fpu' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp into x86/fpu

Pull FPU updates from Borislav Petkov:

 "A round of updates to the FPU maze from Oleg and Rik. It should make
  the code a bit more understandable/readable/streamlined and a preparation
  for more cleanups and improvements in that area."

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-02-19 11:17:42 +01:00
commit f353e61230
4 changed files with 49 additions and 50 deletions

View File

@ -67,6 +67,34 @@ extern void finit_soft_fpu(struct i387_soft_struct *soft);
static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
#endif #endif
/*
* Must be run with preemption disabled: this clears the fpu_owner_task,
* on this CPU.
*
* This will disable any lazy FPU state restore of the current FPU state,
* but if the current thread owns the FPU, it will still be saved by.
*/
static inline void __cpu_disable_lazy_restore(unsigned int cpu)
{
per_cpu(fpu_owner_task, cpu) = NULL;
}
/*
* Used to indicate that the FPU state in memory is newer than the FPU
* state in registers, and the FPU state should be reloaded next time the
* task is run. Only safe on the current task, or non-running tasks.
*/
static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk)
{
tsk->thread.fpu.last_cpu = ~0;
}
static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
{
return new == this_cpu_read_stable(fpu_owner_task) &&
cpu == new->thread.fpu.last_cpu;
}
static inline int is_ia32_compat_frame(void) static inline int is_ia32_compat_frame(void)
{ {
return config_enabled(CONFIG_IA32_EMULATION) && return config_enabled(CONFIG_IA32_EMULATION) &&
@ -400,24 +428,6 @@ static inline void drop_init_fpu(struct task_struct *tsk)
*/ */
typedef struct { int preload; } fpu_switch_t; typedef struct { int preload; } fpu_switch_t;
/*
* Must be run with preemption disabled: this clears the fpu_owner_task,
* on this CPU.
*
* This will disable any lazy FPU state restore of the current FPU state,
* but if the current thread owns the FPU, it will still be saved by.
*/
static inline void __cpu_disable_lazy_restore(unsigned int cpu)
{
per_cpu(fpu_owner_task, cpu) = NULL;
}
static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
{
return new == this_cpu_read_stable(fpu_owner_task) &&
cpu == new->thread.fpu.last_cpu;
}
static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
{ {
fpu_switch_t fpu; fpu_switch_t fpu;
@ -426,13 +436,17 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
* If the task has used the math, pre-load the FPU on xsave processors * If the task has used the math, pre-load the FPU on xsave processors
* or if the past 5 consecutive context-switches used math. * or if the past 5 consecutive context-switches used math.
*/ */
fpu.preload = tsk_used_math(new) && (use_eager_fpu() || fpu.preload = tsk_used_math(new) &&
new->thread.fpu_counter > 5); (use_eager_fpu() || new->thread.fpu_counter > 5);
if (__thread_has_fpu(old)) { if (__thread_has_fpu(old)) {
if (!__save_init_fpu(old)) if (!__save_init_fpu(old))
cpu = ~0; task_disable_lazy_fpu_restore(old);
old->thread.fpu.last_cpu = cpu; else
old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ old->thread.fpu.last_cpu = cpu;
/* But leave fpu_owner_task! */
old->thread.fpu.has_fpu = 0;
/* Don't change CR0.TS if we just switch! */ /* Don't change CR0.TS if we just switch! */
if (fpu.preload) { if (fpu.preload) {
@ -443,10 +457,10 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
stts(); stts();
} else { } else {
old->thread.fpu_counter = 0; old->thread.fpu_counter = 0;
old->thread.fpu.last_cpu = ~0; task_disable_lazy_fpu_restore(old);
if (fpu.preload) { if (fpu.preload) {
new->thread.fpu_counter++; new->thread.fpu_counter++;
if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) if (fpu_lazy_restore(new, cpu))
fpu.preload = 0; fpu.preload = 0;
else else
prefetch(new->thread.fpu.state); prefetch(new->thread.fpu.state);
@ -519,24 +533,6 @@ static inline void __save_fpu(struct task_struct *tsk)
fpu_fxsave(&tsk->thread.fpu); fpu_fxsave(&tsk->thread.fpu);
} }
/*
* These disable preemption on their own and are safe
*/
static inline void save_init_fpu(struct task_struct *tsk)
{
WARN_ON_ONCE(!__thread_has_fpu(tsk));
if (use_eager_fpu()) {
__save_fpu(tsk);
return;
}
preempt_disable();
__save_init_fpu(tsk);
__thread_fpu_end(tsk);
preempt_enable();
}
/* /*
* i387 state interaction * i387 state interaction
*/ */

View File

@ -119,10 +119,13 @@ void unlazy_fpu(struct task_struct *tsk)
{ {
preempt_disable(); preempt_disable();
if (__thread_has_fpu(tsk)) { if (__thread_has_fpu(tsk)) {
__save_init_fpu(tsk); if (use_eager_fpu()) {
__thread_fpu_end(tsk); __save_fpu(tsk);
} else } else {
tsk->thread.fpu_counter = 0; __save_init_fpu(tsk);
__thread_fpu_end(tsk);
}
}
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(unlazy_fpu); EXPORT_SYMBOL(unlazy_fpu);
@ -246,7 +249,7 @@ int init_fpu(struct task_struct *tsk)
if (tsk_used_math(tsk)) { if (tsk_used_math(tsk)) {
if (cpu_has_fpu && tsk == current) if (cpu_has_fpu && tsk == current)
unlazy_fpu(tsk); unlazy_fpu(tsk);
tsk->thread.fpu.last_cpu = ~0; task_disable_lazy_fpu_restore(tsk);
return 0; return 0;
} }

View File

@ -68,8 +68,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
dst->thread.fpu_counter = 0; dst->thread.fpu_counter = 0;
dst->thread.fpu.has_fpu = 0; dst->thread.fpu.has_fpu = 0;
dst->thread.fpu.last_cpu = ~0;
dst->thread.fpu.state = NULL; dst->thread.fpu.state = NULL;
task_disable_lazy_fpu_restore(dst);
if (tsk_used_math(src)) { if (tsk_used_math(src)) {
int err = fpu_alloc(&dst->thread.fpu); int err = fpu_alloc(&dst->thread.fpu);
if (err) if (err)

View File

@ -734,7 +734,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
/* /*
* Save the info for the exception handler and clear the error. * Save the info for the exception handler and clear the error.
*/ */
save_init_fpu(task); unlazy_fpu(task);
task->thread.trap_nr = trapnr; task->thread.trap_nr = trapnr;
task->thread.error_code = error_code; task->thread.error_code = error_code;
info.si_signo = SIGFPE; info.si_signo = SIGFPE;