x86/asm/entry/64: Save user RSP in pt_regs->sp on SYSCALL64 fastpath
Prepare for the removal of 'usersp', by simplifying PER_CPU(old_rsp) usage: - use it only as temp storage - store the userspace stack pointer immediately in pt_regs->sp on syscall entry, instead of using it later, on syscall exit. - change C code to use pt_regs->sp only, instead of PER_CPU(old_rsp) and task->thread.usersp. FIXUP/RESTORE_TOP_OF_STACK are simplified as well. Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com> Cc: Alexei Starovoitov <ast@plumgrid.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Kees Cook <keescook@chromium.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Will Drewry <wad@chromium.org> Link: http://lkml.kernel.org/r/1425926364-9526-4-git-send-email-dvlasenk@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
616ab249f1
commit
263042e463
|
@ -301,7 +301,7 @@ static inline void __user *arch_compat_alloc_user_space(long len)
|
|||
sp = task_pt_regs(current)->sp;
|
||||
} else {
|
||||
/* -128 for the x32 ABI redzone */
|
||||
sp = this_cpu_read(old_rsp) - 128;
|
||||
sp = task_pt_regs(current)->sp - 128;
|
||||
}
|
||||
|
||||
return (void __user *)round_down(sp - len, 16);
|
||||
|
|
|
@ -145,12 +145,8 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
|
|||
#endif
|
||||
}
|
||||
|
||||
#define current_user_stack_pointer() this_cpu_read(old_rsp)
|
||||
/* ia32 vs. x32 difference */
|
||||
#define compat_user_stack_pointer() \
|
||||
(test_thread_flag(TIF_IA32) \
|
||||
? current_pt_regs()->sp \
|
||||
: this_cpu_read(old_rsp))
|
||||
#define current_user_stack_pointer() current_pt_regs()->sp
|
||||
#define compat_user_stack_pointer() current_pt_regs()->sp
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
|
|
@ -128,8 +128,6 @@ ENDPROC(native_usergs_sysret64)
|
|||
* manipulation.
|
||||
*/
|
||||
.macro FIXUP_TOP_OF_STACK tmp offset=0
|
||||
movq PER_CPU_VAR(old_rsp),\tmp
|
||||
movq \tmp,RSP+\offset(%rsp)
|
||||
movq $__USER_DS,SS+\offset(%rsp)
|
||||
movq $__USER_CS,CS+\offset(%rsp)
|
||||
movq RIP+\offset(%rsp),\tmp /* get rip */
|
||||
|
@ -139,8 +137,7 @@ ENDPROC(native_usergs_sysret64)
|
|||
.endm
|
||||
|
||||
.macro RESTORE_TOP_OF_STACK tmp offset=0
|
||||
movq RSP+\offset(%rsp),\tmp
|
||||
movq \tmp,PER_CPU_VAR(old_rsp)
|
||||
/* nothing to do */
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -222,9 +219,6 @@ ENDPROC(native_usergs_sysret64)
|
|||
* Interrupts are off on entry.
|
||||
* Only called from user space.
|
||||
*
|
||||
* XXX if we had a free scratch register we could save the RSP into the stack frame
|
||||
* and report it properly in ps. Unfortunately we haven't.
|
||||
*
|
||||
* When user can change the frames always force IRET. That is because
|
||||
* it deals with uncanonical addresses better. SYSRET has trouble
|
||||
* with them due to bugs in both AMD and Intel CPUs.
|
||||
|
@ -253,11 +247,13 @@ GLOBAL(system_call_after_swapgs)
|
|||
*/
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
ALLOC_PT_GPREGS_ON_STACK 8 /* +8: space for orig_ax */
|
||||
movq %rcx,RIP(%rsp)
|
||||
movq PER_CPU_VAR(old_rsp),%rcx
|
||||
movq %r11,EFLAGS(%rsp)
|
||||
movq %rcx,RSP(%rsp)
|
||||
movq_cfi rax,ORIG_RAX
|
||||
SAVE_C_REGS_EXCEPT_RAX_RCX_R11
|
||||
movq $-ENOSYS,RAX(%rsp)
|
||||
movq_cfi rax,ORIG_RAX
|
||||
movq %r11,EFLAGS(%rsp)
|
||||
movq %rcx,RIP(%rsp)
|
||||
CFI_REL_OFFSET rip,RIP
|
||||
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP)
|
||||
jnz tracesys
|
||||
|
@ -293,7 +289,7 @@ ret_from_sys_call:
|
|||
CFI_REGISTER rip,rcx
|
||||
movq EFLAGS(%rsp),%r11
|
||||
/*CFI_REGISTER rflags,r11*/
|
||||
movq PER_CPU_VAR(old_rsp), %rsp
|
||||
movq RSP(%rsp),%rsp
|
||||
/*
|
||||
* 64bit SYSRET restores rip from rcx,
|
||||
* rflags from r11 (but RF and VM bits are forced to 0),
|
||||
|
|
|
@ -177,7 +177,7 @@ void perf_get_regs_user(struct perf_regs *regs_user,
|
|||
* than just blindly copying user_regs.
|
||||
*/
|
||||
regs_user->abi = PERF_SAMPLE_REGS_ABI_64;
|
||||
regs_user_copy->sp = this_cpu_read(old_rsp);
|
||||
regs_user_copy->sp = user_regs->sp;
|
||||
regs_user_copy->cs = __USER_CS;
|
||||
regs_user_copy->ss = __USER_DS;
|
||||
regs_user_copy->cx = -1; /* usually contains garbage */
|
||||
|
|
|
@ -602,6 +602,5 @@ long sys_arch_prctl(int code, unsigned long addr)
|
|||
|
||||
unsigned long KSTK_ESP(struct task_struct *task)
|
||||
{
|
||||
return (test_tsk_thread_flag(task, TIF_IA32)) ?
|
||||
(task_pt_regs(task)->sp) : ((task)->thread.usersp);
|
||||
return task_pt_regs(task)->sp;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue