2010-05-29 11:09:12 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2002 David Howells (dhowells@redhat.com)
|
|
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation, version 2.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ASM_TILE_THREAD_INFO_H
|
|
|
|
#define _ASM_TILE_THREAD_INFO_H
|
|
|
|
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Low level task data that assembly code needs immediate access to.
|
|
|
|
* The structure is placed at the bottom of the supervisor stack.
|
|
|
|
*/
|
|
|
|
struct thread_info {
|
|
|
|
struct task_struct *task; /* main task structure */
|
|
|
|
unsigned long flags; /* low level flags */
|
|
|
|
unsigned long status; /* thread-synchronous flags */
|
|
|
|
__u32 homecache_cpu; /* CPU we are homecached on */
|
|
|
|
__u32 cpu; /* current CPU */
|
|
|
|
int preempt_count; /* 0 => preemptable,
|
|
|
|
<0 => BUG */
|
|
|
|
|
|
|
|
mm_segment_t addr_limit; /* thread address space
|
|
|
|
(KERNEL_DS or USER_DS) */
|
|
|
|
struct single_step_state *step_state; /* single step state
|
|
|
|
(if non-zero) */
|
2013-08-07 04:04:13 +08:00
|
|
|
int align_ctl; /* controls unaligned access */
|
|
|
|
#ifdef __tilegx__
|
|
|
|
unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */
|
|
|
|
void __user *unalign_jit_base; /* unalign fixup JIT base */
|
|
|
|
#endif
|
tile: improve stack backtrace
This commit fixes a number of issues with the tile backtrace code.
- Don't try to identify userspace shared object or executable paths
if we are doing a backtrace from an interrupt; it's not legal,
and also unlikely to be interesting. Likewise, don't try to do
it for other address spaces, since d_path() assumes it is being
called in "current" context.
- Move "in_backtrace" from thread_struct to thread_info.
This way we can access it even if our stack thread_info has been
clobbered, which makes backtracing more robust.
- Avoid using "current" directly when testing for is_sigreturn().
Since "current" may be corrupt, we're better off using kbt->task
explicitly to look up the vdso_base for the current task.
Conveniently, this simplifies the internal APIs (we only need
one is_sigreturn() function now).
- Avoid bogus "Odd fault" warning when pc/sp/ex1 are all zero,
as is true for kernel threads above the last frame.
- Hook into Tejun Heo's dump_stack() framework in lib/dump_stack.c.
- Write last entry in save_stack_trace() as ULONG_MAX, not zero,
since ftrace (at least) relies on finding that marker.
- Implement save_stack_trace_regs() and save_strack_trace_user(),
and set CONFIG_USER_STACKTRACE_SUPPORT.
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
2015-05-08 22:27:35 +08:00
|
|
|
bool in_backtrace; /* currently doing backtrace? */
|
2010-05-29 11:09:12 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* macros/functions for gaining access to the thread information structure.
|
|
|
|
*/
|
|
|
|
#define INIT_THREAD_INFO(tsk) \
|
|
|
|
{ \
|
|
|
|
.task = &tsk, \
|
|
|
|
.flags = 0, \
|
|
|
|
.cpu = 0, \
|
|
|
|
.preempt_count = INIT_PREEMPT_COUNT, \
|
|
|
|
.addr_limit = KERNEL_DS, \
|
2010-06-26 05:04:17 +08:00
|
|
|
.step_state = NULL, \
|
2013-08-07 04:04:13 +08:00
|
|
|
.align_ctl = 0, \
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define init_thread_info (init_thread_union.thread_info)
|
|
|
|
#define init_stack (init_thread_union.stack)
|
|
|
|
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
#if PAGE_SIZE < 8192
|
|
|
|
#define THREAD_SIZE_ORDER (13 - PAGE_SHIFT)
|
|
|
|
#else
|
|
|
|
#define THREAD_SIZE_ORDER (0)
|
|
|
|
#endif
|
2011-03-01 05:37:34 +08:00
|
|
|
#define THREAD_SIZE_PAGES (1 << THREAD_SIZE_ORDER)
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
|
|
|
#define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER)
|
|
|
|
|
|
|
|
#define STACK_WARN (THREAD_SIZE/8)
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
Clarify naming of thread info/stack allocators
We've had the thread info allocated together with the thread stack for
most architectures for a long time (since the thread_info was split off
from the task struct), but that is about to change.
But the patches that move the thread info to be off-stack (and a part of
the task struct instead) made it clear how confused the allocator and
freeing functions are.
Because the common case was that we share an allocation with the thread
stack and the thread_info, the two pointers were identical. That
identity then meant that we would have things like
ti = alloc_thread_info_node(tsk, node);
...
tsk->stack = ti;
which certainly _worked_ (since stack and thread_info have the same
value), but is rather confusing: why are we assigning a thread_info to
the stack? And if we move the thread_info away, the "confusing" code
just gets to be entirely bogus.
So remove all this confusion, and make it clear that we are doing the
stack allocation by renaming and clarifying the function names to be
about the stack. The fact that the thread_info then shares the
allocation is an implementation detail, and not really about the
allocation itself.
This is a pure renaming and type fix: we pass in the same pointer, it's
just that we clarify what the pointer means.
The ia64 code that actually only has one single allocation (for all of
task_struct, thread_info and kernel thread stack) now looks a bit odd,
but since "tsk->stack" is actually not even used there, that oddity
doesn't matter. It would be a separate thing to clean that up, I
intentionally left the ia64 changes as a pure brute-force renaming and
type change.
Acked-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-06-25 06:09:37 +08:00
|
|
|
void arch_release_thread_stack(unsigned long *stack);
|
2012-05-05 23:05:47 +08:00
|
|
|
|
2010-05-29 11:09:12 +08:00
|
|
|
/* How to get the thread information struct from C. */
|
|
|
|
register unsigned long stack_pointer __asm__("sp");
|
|
|
|
|
|
|
|
#define current_thread_info() \
|
|
|
|
((struct thread_info *)(stack_pointer & -THREAD_SIZE))
|
|
|
|
|
2010-06-26 05:04:17 +08:00
|
|
|
/* Sit on a nap instruction until interrupted. */
|
|
|
|
extern void smp_nap(void);
|
|
|
|
|
2014-05-23 16:54:25 +08:00
|
|
|
/* Enable interrupts racelessly and nap forever: helper for arch_cpu_idle(). */
|
2010-06-26 05:04:17 +08:00
|
|
|
extern void _cpu_idle(void);
|
|
|
|
|
2010-05-29 11:09:12 +08:00
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
|
arch/tile: fix up some issues in calling do_work_pending()
First, we were at risk of handling thread-info flags, in particular
do_signal(), when returning from kernel space. This could happen
after a failed kernel_execve(), or when forking a kernel thread.
The fix is to test in do_work_pending() for user_mode() and return
immediately if so; we already had this test for one of the flags,
so I just hoisted it to the top of the function.
Second, if a ptraced process updated the callee-saved registers
in the ptregs struct and then processed another thread-info flag, we
would overwrite the modifications with the original callee-saved
registers. To fix this, we add a register to note if we've already
saved the registers once, and skip doing it on additional passes
through the loop. To avoid a performance hit from the couple of
extra instructions involved, I modified the GET_THREAD_INFO() macro
to be guaranteed to be one instruction, then bundled it with adjacent
instructions, yielding an overall net savings.
Reported-By: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2012-04-29 06:51:43 +08:00
|
|
|
/*
|
|
|
|
* How to get the thread information struct from assembly.
|
|
|
|
* Note that we use different macros since different architectures
|
|
|
|
* have different semantics in their "mm" instruction and we would
|
|
|
|
* like to guarantee that the macro expands to exactly one instruction.
|
|
|
|
*/
|
2010-05-29 11:09:12 +08:00
|
|
|
#ifdef __tilegx__
|
arch/tile: fix up some issues in calling do_work_pending()
First, we were at risk of handling thread-info flags, in particular
do_signal(), when returning from kernel space. This could happen
after a failed kernel_execve(), or when forking a kernel thread.
The fix is to test in do_work_pending() for user_mode() and return
immediately if so; we already had this test for one of the flags,
so I just hoisted it to the top of the function.
Second, if a ptraced process updated the callee-saved registers
in the ptregs struct and then processed another thread-info flag, we
would overwrite the modifications with the original callee-saved
registers. To fix this, we add a register to note if we've already
saved the registers once, and skip doing it on additional passes
through the loop. To avoid a performance hit from the couple of
extra instructions involved, I modified the GET_THREAD_INFO() macro
to be guaranteed to be one instruction, then bundled it with adjacent
instructions, yielding an overall net savings.
Reported-By: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2012-04-29 06:51:43 +08:00
|
|
|
#define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63
|
2010-05-29 11:09:12 +08:00
|
|
|
#else
|
|
|
|
#define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Thread information flags that various assembly files may need to access.
|
|
|
|
* Keep flags accessed frequently in low bits, particular since it makes
|
|
|
|
* it easier to build constants in assembly.
|
|
|
|
*/
|
|
|
|
#define TIF_SIGPENDING 0 /* signal pending */
|
|
|
|
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
|
|
|
|
#define TIF_SINGLESTEP 2 /* restore singlestep on return to
|
|
|
|
user mode */
|
|
|
|
#define TIF_ASYNC_TLB 3 /* got an async TLB fault in kernel */
|
|
|
|
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
|
|
|
|
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
|
|
|
|
#define TIF_SECCOMP 6 /* secure computing */
|
|
|
|
#define TIF_MEMDIE 7 /* OOM killer at work */
|
2011-05-03 02:50:06 +08:00
|
|
|
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
2013-01-22 08:54:57 +08:00
|
|
|
#define TIF_SYSCALL_TRACEPOINT 9 /* syscall tracepoint instrumentation */
|
2014-04-11 15:59:13 +08:00
|
|
|
#define TIF_POLLING_NRFLAG 10 /* idle is polling for TIF_NEED_RESCHED */
|
2015-03-24 02:23:58 +08:00
|
|
|
#define TIF_NOHZ 11 /* in adaptive nohz mode */
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
|
|
|
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
|
|
|
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
|
|
|
|
#define _TIF_ASYNC_TLB (1<<TIF_ASYNC_TLB)
|
|
|
|
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
|
|
|
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
|
|
|
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
|
|
|
|
#define _TIF_MEMDIE (1<<TIF_MEMDIE)
|
2011-05-03 02:50:06 +08:00
|
|
|
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
2013-01-22 08:54:57 +08:00
|
|
|
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
2014-04-11 15:59:13 +08:00
|
|
|
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
2015-03-24 02:23:58 +08:00
|
|
|
#define _TIF_NOHZ (1<<TIF_NOHZ)
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2015-09-23 02:49:41 +08:00
|
|
|
/* Work to do as we loop to exit to user space. */
|
|
|
|
#define _TIF_WORK_MASK \
|
|
|
|
(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
|
|
|
_TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME)
|
|
|
|
|
2010-05-29 11:09:12 +08:00
|
|
|
/* Work to do on any return to user space. */
|
|
|
|
#define _TIF_ALLWORK_MASK \
|
2015-09-23 02:49:41 +08:00
|
|
|
(_TIF_WORK_MASK | _TIF_SINGLESTEP | _TIF_NOHZ)
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2013-01-22 08:54:57 +08:00
|
|
|
/* Work to do at syscall entry. */
|
2015-03-24 02:23:58 +08:00
|
|
|
#define _TIF_SYSCALL_ENTRY_WORK \
|
|
|
|
(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
|
2013-01-22 08:54:57 +08:00
|
|
|
|
|
|
|
/* Work to do at syscall exit. */
|
|
|
|
#define _TIF_SYSCALL_EXIT_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
|
|
|
|
|
2010-05-29 11:09:12 +08:00
|
|
|
/*
|
|
|
|
* Thread-synchronous status.
|
|
|
|
*
|
|
|
|
* This is different from the flags in that nobody else
|
|
|
|
* ever touches our thread-synchronous status, so we don't
|
|
|
|
* have to worry about atomic accesses.
|
|
|
|
*/
|
|
|
|
#ifdef __tilegx__
|
|
|
|
#define TS_COMPAT 0x0001 /* 32-bit compatibility mode */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _ASM_TILE_THREAD_INFO_H */
|