x86/fpu: Pass 'struct fpu' to fpu__restore()
This cleans up the call sites and the function a bit, and also makes it more symmetric with the other high level FPU state handling functions. It's still only valid for the current task, as we copy to the FPU registers of the current CPU. No change in functionality. Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
32231879f6
commit
e1884d69f6
|
@ -50,7 +50,7 @@ extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
|
|||
extern void fpu__activate_curr(struct fpu *fpu);
|
||||
extern void fpu__activate_stopped(struct fpu *fpu);
|
||||
extern void fpu__save(struct fpu *fpu);
|
||||
extern void fpu__restore(void);
|
||||
extern void fpu__restore(struct fpu *fpu);
|
||||
extern int fpu__restore_sig(void __user *buf, int ia32_frame);
|
||||
extern void fpu__drop(struct fpu *fpu);
|
||||
extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
|
||||
|
|
|
@ -343,11 +343,8 @@ void fpu__activate_stopped(struct fpu *child_fpu)
|
|||
* with local interrupts disabled, as it is in the case of
|
||||
* do_device_not_available()).
|
||||
*/
|
||||
void fpu__restore(void)
|
||||
void fpu__restore(struct fpu *fpu)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct fpu *fpu = &tsk->thread.fpu;
|
||||
|
||||
fpu__activate_curr(fpu);
|
||||
|
||||
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
|
||||
|
@ -355,9 +352,9 @@ void fpu__restore(void)
|
|||
fpregs_activate(fpu);
|
||||
if (unlikely(copy_fpstate_to_fpregs(fpu))) {
|
||||
fpu__clear(fpu);
|
||||
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
|
||||
force_sig_info(SIGSEGV, SEND_SIG_PRIV, current);
|
||||
} else {
|
||||
tsk->thread.fpu.counter++;
|
||||
fpu->counter++;
|
||||
}
|
||||
kernel_fpu_enable();
|
||||
}
|
||||
|
|
|
@ -319,7 +319,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
|||
fpu->fpstate_active = 1;
|
||||
if (use_eager_fpu()) {
|
||||
preempt_disable();
|
||||
fpu__restore();
|
||||
fpu__restore(fpu);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
|
|
|
@ -803,7 +803,7 @@ do_device_not_available(struct pt_regs *regs, long error_code)
|
|||
return;
|
||||
}
|
||||
#endif
|
||||
fpu__restore(); /* interrupts still off */
|
||||
fpu__restore(¤t->thread.fpu); /* interrupts still off */
|
||||
#ifdef CONFIG_X86_32
|
||||
conditional_sti(regs);
|
||||
#endif
|
||||
|
|
|
@ -302,7 +302,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
|
|||
* before this.
|
||||
*/
|
||||
else if (cpu->regs->trapnum == 7 && !fpregs_active())
|
||||
fpu__restore();
|
||||
fpu__restore(¤t->thread.fpu);
|
||||
}
|
||||
|
||||
/*H:130
|
||||
|
|
Loading…
Reference in New Issue