x86/entry/64: Open-code switch_to_thread_stack()
Open-code the two instances which called switch_to_thread_stack(). This allows us to remove the wrapper around DO_SWITCH_TO_THREAD_STACK. While at it, update the UNWIND hint to reflect where the IRET frame is, and update the commentary to reflect what we are actually doing here. Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net> Acked-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180220210113.6725-7-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b2855d8d2d
commit
f3d415ea46
|
@ -538,17 +538,48 @@ END(irq_entries_start)
|
|||
.endm
|
||||
|
||||
/*
|
||||
* Switch to the thread stack. This is called with the IRET frame and
|
||||
* orig_ax on the stack. (That is, RDI..R12 are not on the stack and
|
||||
* space has not been allocated for them.)
|
||||
* Interrupt entry helper function.
|
||||
*
|
||||
* Entry runs with interrupts off. Stack layout at entry:
|
||||
* +----------------------------------------------------+
|
||||
* | regs->ss |
|
||||
* | regs->rsp |
|
||||
* | regs->eflags |
|
||||
* | regs->cs |
|
||||
* | regs->ip |
|
||||
* +----------------------------------------------------+
|
||||
* | regs->orig_ax = ~(interrupt number) |
|
||||
* +----------------------------------------------------+
|
||||
* | return address |
|
||||
* +----------------------------------------------------+
|
||||
*/
|
||||
.macro DO_SWITCH_TO_THREAD_STACK
|
||||
ENTRY(interrupt_entry)
|
||||
UNWIND_HINT_FUNC
|
||||
ASM_CLAC
|
||||
cld
|
||||
|
||||
testb $3, CS-ORIG_RAX+8(%rsp)
|
||||
jz 1f
|
||||
SWAPGS
|
||||
|
||||
/*
|
||||
* Switch to the thread stack. The IRET frame and orig_ax are
|
||||
* on the stack, as well as the return address. RDI..R12 are
|
||||
* not (yet) on the stack and space has not (yet) been
|
||||
* allocated for them.
|
||||
*/
|
||||
pushq %rdi
|
||||
|
||||
/* Need to switch before accessing the thread stack. */
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
|
||||
movq %rsp, %rdi
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
|
||||
|
||||
/*
|
||||
* We have RDI, return address, and orig_ax on the stack on
|
||||
* top of the IRET frame. That means offset=24
|
||||
*/
|
||||
UNWIND_HINT_IRET_REGS base=%rdi offset=24
|
||||
|
||||
pushq 7*8(%rdi) /* regs->ss */
|
||||
pushq 6*8(%rdi) /* regs->rsp */
|
||||
|
@ -560,25 +591,6 @@ END(irq_entries_start)
|
|||
UNWIND_HINT_FUNC
|
||||
|
||||
movq (%rdi), %rdi
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Interrupt entry/exit.
|
||||
*
|
||||
* Interrupt entry points save only callee clobbered registers in fast path.
|
||||
*
|
||||
* Entry runs with interrupts off.
|
||||
*/
|
||||
/* 8(%rsp): ~(interrupt number) */
|
||||
ENTRY(interrupt_entry)
|
||||
UNWIND_HINT_FUNC
|
||||
ASM_CLAC
|
||||
cld
|
||||
|
||||
testb $3, CS-ORIG_RAX+8(%rsp)
|
||||
jz 1f
|
||||
SWAPGS
|
||||
DO_SWITCH_TO_THREAD_STACK
|
||||
1:
|
||||
|
||||
PUSH_AND_CLEAR_REGS save_ret=1
|
||||
|
@ -592,7 +604,7 @@ ENTRY(interrupt_entry)
|
|||
*
|
||||
* We need to tell lockdep that IRQs are off. We can't do this until
|
||||
* we fix gsbase, and we should do it before enter_from_user_mode
|
||||
* (which can take locks). Since TRACE_IRQS_OFF idempotent,
|
||||
* (which can take locks). Since TRACE_IRQS_OFF is idempotent,
|
||||
* the simplest way to handle it is to just call it twice if
|
||||
* we enter from user mode. There's no reason to optimize this since
|
||||
* TRACE_IRQS_OFF is a no-op if lockdep is off.
|
||||
|
@ -609,6 +621,9 @@ ENTRY(interrupt_entry)
|
|||
ret
|
||||
END(interrupt_entry)
|
||||
|
||||
|
||||
/* Interrupt entry/exit. */
|
||||
|
||||
/*
|
||||
* The interrupt stubs push (~vector+0x80) onto the stack and
|
||||
* then jump to common_interrupt.
|
||||
|
@ -878,17 +893,6 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
|
|||
*/
|
||||
#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
|
||||
|
||||
#if defined(CONFIG_IA32_EMULATION)
|
||||
/* entry_64_compat.S::entry_INT80_compat expects this to be an ASM function */
|
||||
ENTRY(switch_to_thread_stack)
|
||||
UNWIND_HINT_FUNC
|
||||
|
||||
DO_SWITCH_TO_THREAD_STACK
|
||||
|
||||
ret
|
||||
END(switch_to_thread_stack)
|
||||
#endif
|
||||
|
||||
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
|
||||
ENTRY(\sym)
|
||||
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
|
||||
|
|
|
@ -347,10 +347,23 @@ ENTRY(entry_INT80_compat)
|
|||
*/
|
||||
movl %eax, %eax
|
||||
|
||||
/* switch to thread stack expects orig_ax and rdi to be pushed */
|
||||
pushq %rax /* pt_regs->orig_ax */
|
||||
pushq %rdi /* pt_regs->di */
|
||||
|
||||
/* switch to thread stack expects orig_ax to be pushed */
|
||||
call switch_to_thread_stack
|
||||
/* Need to switch before accessing the thread stack. */
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
|
||||
movq %rsp, %rdi
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
|
||||
pushq 6*8(%rdi) /* regs->ss */
|
||||
pushq 5*8(%rdi) /* regs->rsp */
|
||||
pushq 4*8(%rdi) /* regs->eflags */
|
||||
pushq 3*8(%rdi) /* regs->cs */
|
||||
pushq 2*8(%rdi) /* regs->ip */
|
||||
pushq 1*8(%rdi) /* regs->orig_ax */
|
||||
|
||||
movq (%rdi), %rdi /* restore %rdi */
|
||||
|
||||
pushq %rdi /* pt_regs->di */
|
||||
pushq %rsi /* pt_regs->si */
|
||||
|
|
Loading…
Reference in New Issue