Three more cleanups/improvements to the FPU handling code. (Oleg Nesterov)
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJU9MNSAAoJEBLB8Bhh3lVKEW8QALT5e5ubTfWfkdtXPj19JO5G GMzl1zL+9z9kBk6+SpbTdtUTr7rMHpCt3x807yo6K46Z17ks+AJ8P9/lbjNXfGQ1 0ULfo9s1dmPAsiTvccjQL2pNGJAzm6NCo4zuysu+r+4oiN4vj9ldPCz5Irjtp8zT 5dUgk3Yyd2tM4ZhcoSr6ERUwm1SalFvGgDzenXV33WJHlYwqbxxG1Efsw1G3Ly75 WTg9zLhq/dCDHyUbnEREufBEOSPdWE1JGacj9wTi8KiJAi6Wjgw/eIg4IlV5th8B QihKF1+k1DqNvGMcU0xK85Oc3EBxG6pN9IY0Zi0X+LPm05h9PLsWSnK+/fSHv6sg Y6MBoOmGwqvemkryqYMsdo414LVdmgk95qY1jCTO8EpSfgono5kKTUhUjp+9fR06 +JK0ifUa6FvLE/sVZf4cK/z+ukflL+CCq0LHObSSmYRIJB55wrCuiIGe5jqnrY8U F58yxIC/b5yP3i0qcIHGbRK1bCXZeVHqAY9zVYOtduMW00WBawsWuruB+PJhcXUO 161O6NaotdlK1WaP5VEyUBILAENqzsu5TrEFdNJ+bNWQqXJZdhzhqJugxBDKC4NB zBQDs36Up14+W7PHuh+SBfHPGq+Uai3PY4Efb1rSYqpVrpEx5bkDmz6f8Dw/wCWZ /SP/OAlZbm5DPltR+Ho2 =z99u -----END PGP SIGNATURE----- Merge tag 'tip_x86_fpu' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp into x86/fpu Pull x86/fpu updates from Borislav Petkov: "Three more cleanups/improvements to the FPU handling code. (Oleg Nesterov)" Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
ae486033b9
|
@ -41,8 +41,8 @@ void kernel_fpu_enable(void)
|
|||
* be set (so that the clts/stts pair does nothing that is
|
||||
* visible in the interrupted kernel thread).
|
||||
*
|
||||
* Except for the eagerfpu case when we return 1 unless we've already
|
||||
* been eager and saved the state in kernel_fpu_begin().
|
||||
* Except for the eagerfpu case when we return true; in the likely case
|
||||
* the thread has FPU but we are not going to set/clear TS.
|
||||
*/
|
||||
static inline bool interrupted_kernel_fpu_idle(void)
|
||||
{
|
||||
|
@ -50,7 +50,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
|
|||
return false;
|
||||
|
||||
if (use_eager_fpu())
|
||||
return __thread_has_fpu(current);
|
||||
return true;
|
||||
|
||||
return !__thread_has_fpu(current) &&
|
||||
(read_cr0() & X86_CR0_TS);
|
||||
|
@ -93,9 +93,10 @@ void __kernel_fpu_begin(void)
|
|||
|
||||
if (__thread_has_fpu(me)) {
|
||||
__save_init_fpu(me);
|
||||
} else if (!use_eager_fpu()) {
|
||||
} else {
|
||||
this_cpu_write(fpu_owner_task, NULL);
|
||||
clts();
|
||||
if (!use_eager_fpu())
|
||||
clts();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__kernel_fpu_begin);
|
||||
|
|
|
@ -130,6 +130,7 @@ void flush_thread(void)
|
|||
|
||||
flush_ptrace_hw_breakpoint(tsk);
|
||||
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
|
||||
|
||||
drop_init_fpu(tsk);
|
||||
/*
|
||||
* Free the FPU state for non xsave platforms. They get reallocated
|
||||
|
@ -137,6 +138,12 @@ void flush_thread(void)
|
|||
*/
|
||||
if (!use_eager_fpu())
|
||||
free_thread_xstate(tsk);
|
||||
else if (!used_math()) {
|
||||
/* kthread execs. TODO: cleanup this horror. */
|
||||
if (WARN_ON(init_fpu(current)))
|
||||
force_sig(SIGKILL, current);
|
||||
math_state_restore();
|
||||
}
|
||||
}
|
||||
|
||||
static void hard_disable_TSC(void)
|
||||
|
|
|
@ -688,7 +688,7 @@ void eager_fpu_init(void)
|
|||
{
|
||||
static __refdata void (*boot_func)(void) = eager_fpu_init_bp;
|
||||
|
||||
clear_used_math();
|
||||
WARN_ON(used_math());
|
||||
current_thread_info()->status = 0;
|
||||
|
||||
if (eagerfpu == ENABLE)
|
||||
|
@ -703,17 +703,6 @@ void eager_fpu_init(void)
|
|||
boot_func();
|
||||
boot_func = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is same as math_state_restore(). But use_xsave() is
|
||||
* not yet patched to use math_state_restore().
|
||||
*/
|
||||
init_fpu(current);
|
||||
__thread_fpu_begin(current);
|
||||
if (cpu_has_xsave)
|
||||
xrstor_state(init_xstate_buf, -1);
|
||||
else
|
||||
fxrstor_checking(&init_xstate_buf->i387);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue