x86/fpu: Eliminate the __thread_has_fpu() wrapper
Start migrating FPU methods towards using 'struct fpu *fpu' directly. __thread_has_fpu() is just a trivial wrapper around fpu->has_fpu, eliminate it. Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
9a89b02918
commit
276983f808
|
@ -323,16 +323,6 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
|
|||
return fpu_restore_checking(&tsk->thread.fpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Software FPU state helpers. Careful: these need to
|
||||
* be preemption protection *and* they need to be
|
||||
* properly paired with the CR0.TS changes!
|
||||
*/
|
||||
static inline int __thread_has_fpu(struct task_struct *tsk)
|
||||
{
|
||||
return tsk->thread.fpu.has_fpu;
|
||||
}
|
||||
|
||||
/* Must be paired with an 'stts' after! */
|
||||
static inline void __thread_clear_has_fpu(struct task_struct *tsk)
|
||||
{
|
||||
|
@ -370,13 +360,14 @@ static inline void __thread_fpu_begin(struct task_struct *tsk)
|
|||
|
||||
static inline void drop_fpu(struct task_struct *tsk)
|
||||
{
|
||||
struct fpu *fpu = &tsk->thread.fpu;
|
||||
/*
|
||||
* Forget coprocessor state..
|
||||
*/
|
||||
preempt_disable();
|
||||
tsk->thread.fpu.counter = 0;
|
||||
|
||||
if (__thread_has_fpu(tsk)) {
|
||||
if (fpu->has_fpu) {
|
||||
/* Ignore delayed exceptions from user space */
|
||||
asm volatile("1: fwait\n"
|
||||
"2:\n"
|
||||
|
@ -424,6 +415,7 @@ typedef struct { int preload; } fpu_switch_t;
|
|||
|
||||
static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
|
||||
{
|
||||
struct fpu *old_fpu = &old->thread.fpu;
|
||||
fpu_switch_t fpu;
|
||||
|
||||
/*
|
||||
|
@ -433,7 +425,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
|
|||
fpu.preload = tsk_used_math(new) &&
|
||||
(use_eager_fpu() || new->thread.fpu.counter > 5);
|
||||
|
||||
if (__thread_has_fpu(old)) {
|
||||
if (old_fpu->has_fpu) {
|
||||
if (!fpu_save_init(&old->thread.fpu))
|
||||
task_disable_lazy_fpu_restore(old);
|
||||
else
|
||||
|
|
|
@ -57,8 +57,7 @@ static bool interrupted_kernel_fpu_idle(void)
|
|||
if (use_eager_fpu())
|
||||
return true;
|
||||
|
||||
return !__thread_has_fpu(current) &&
|
||||
(read_cr0() & X86_CR0_TS);
|
||||
return !current->thread.fpu.has_fpu && (read_cr0() & X86_CR0_TS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -93,11 +92,12 @@ EXPORT_SYMBOL(irq_fpu_usable);
|
|||
void __kernel_fpu_begin(void)
|
||||
{
|
||||
struct task_struct *me = current;
|
||||
struct fpu *fpu = &me->thread.fpu;
|
||||
|
||||
kernel_fpu_disable();
|
||||
|
||||
if (__thread_has_fpu(me)) {
|
||||
fpu_save_init(&me->thread.fpu);
|
||||
if (fpu->has_fpu) {
|
||||
fpu_save_init(fpu);
|
||||
} else {
|
||||
this_cpu_write(fpu_owner_task, NULL);
|
||||
if (!use_eager_fpu())
|
||||
|
@ -109,8 +109,9 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
|
|||
void __kernel_fpu_end(void)
|
||||
{
|
||||
struct task_struct *me = current;
|
||||
struct fpu *fpu = &me->thread.fpu;
|
||||
|
||||
if (__thread_has_fpu(me)) {
|
||||
if (fpu->has_fpu) {
|
||||
if (WARN_ON(restore_fpu_checking(me)))
|
||||
fpu_reset_state(me);
|
||||
} else if (!use_eager_fpu()) {
|
||||
|
@ -128,14 +129,16 @@ EXPORT_SYMBOL(__kernel_fpu_end);
|
|||
*/
|
||||
void fpu__save(struct task_struct *tsk)
|
||||
{
|
||||
struct fpu *fpu = &tsk->thread.fpu;
|
||||
|
||||
WARN_ON(tsk != current);
|
||||
|
||||
preempt_disable();
|
||||
if (__thread_has_fpu(tsk)) {
|
||||
if (fpu->has_fpu) {
|
||||
if (use_eager_fpu()) {
|
||||
__save_fpu(tsk);
|
||||
} else {
|
||||
fpu_save_init(&tsk->thread.fpu);
|
||||
fpu_save_init(fpu);
|
||||
__thread_fpu_end(tsk);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue