2006-12-25 08:51:47 +08:00
|
|
|
/*
|
|
|
|
* arch/sh/kernel/process.c
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2006-12-25 08:51:47 +08:00
|
|
|
* This file handles the architecture-dependent parts of process handling..
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 1995 Linus Torvalds
|
|
|
|
*
|
|
|
|
* SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
|
2006-10-12 11:16:13 +08:00
|
|
|
* Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
|
2008-09-21 18:04:55 +08:00
|
|
|
* Copyright (C) 2002 - 2008 Paul Mundt
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/elfcore.h>
|
|
|
|
#include <linux/kallsyms.h>
|
2007-07-31 12:01:43 +08:00
|
|
|
#include <linux/fs.h>
|
2009-07-11 08:29:04 +08:00
|
|
|
#include <linux/ftrace.h>
|
2009-11-09 15:27:40 +08:00
|
|
|
#include <linux/hw_breakpoint.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/mmu_context.h>
|
2007-05-08 13:50:59 +08:00
|
|
|
#include <asm/system.h>
|
2008-03-26 18:02:47 +08:00
|
|
|
#include <asm/fpu.h>
|
2008-09-04 17:53:58 +08:00
|
|
|
#include <asm/syscalls.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
void show_regs(struct pt_regs * regs)
|
|
|
|
{
|
|
|
|
printk("\n");
|
2008-11-26 14:20:35 +08:00
|
|
|
printk("Pid : %d, Comm: \t\t%s\n", task_pid_nr(current), current->comm);
|
|
|
|
printk("CPU : %d \t\t%s (%s %.*s)\n\n",
|
2008-08-08 00:23:34 +08:00
|
|
|
smp_processor_id(), print_tainted(), init_utsname()->release,
|
|
|
|
(int)strcspn(init_utsname()->version, " "),
|
|
|
|
init_utsname()->version);
|
|
|
|
|
2006-10-12 16:07:45 +08:00
|
|
|
print_symbol("PC is at %s\n", instruction_pointer(regs));
|
2008-08-08 00:23:34 +08:00
|
|
|
print_symbol("PR is at %s\n", regs->pr);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
printk("PC : %08lx SP : %08lx SR : %08lx ",
|
|
|
|
regs->pc, regs->regs[15], regs->sr);
|
|
|
|
#ifdef CONFIG_MMU
|
2010-01-26 11:58:40 +08:00
|
|
|
printk("TEA : %08x\n", __raw_readl(MMU_TEA));
|
2005-04-17 06:20:36 +08:00
|
|
|
#else
|
2008-08-08 00:23:34 +08:00
|
|
|
printk("\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
|
|
|
|
regs->regs[0],regs->regs[1],
|
|
|
|
regs->regs[2],regs->regs[3]);
|
|
|
|
printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
|
|
|
|
regs->regs[4],regs->regs[5],
|
|
|
|
regs->regs[6],regs->regs[7]);
|
|
|
|
printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n",
|
|
|
|
regs->regs[8],regs->regs[9],
|
|
|
|
regs->regs[10],regs->regs[11]);
|
|
|
|
printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
|
|
|
|
regs->regs[12],regs->regs[13],
|
|
|
|
regs->regs[14]);
|
|
|
|
printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
|
|
|
|
regs->mach, regs->macl, regs->gbr, regs->pr);
|
|
|
|
|
2006-10-12 16:07:45 +08:00
|
|
|
show_trace(NULL, (unsigned long *)regs->regs[15], regs);
|
2008-11-26 13:31:03 +08:00
|
|
|
show_code(regs);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a kernel thread
|
|
|
|
*/
|
2008-12-17 11:20:15 +08:00
|
|
|
ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
|
|
|
|
{
|
|
|
|
do_exit(fn(arg));
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-12-25 08:51:47 +08:00
|
|
|
/* Don't use this in BL=1(cli). Or else, CPU resets! */
|
2005-04-17 06:20:36 +08:00
|
|
|
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
|
2006-12-25 08:51:47 +08:00
|
|
|
{
|
2005-04-17 06:20:36 +08:00
|
|
|
struct pt_regs regs;
|
2008-09-21 12:56:39 +08:00
|
|
|
int pid;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
memset(®s, 0, sizeof(regs));
|
2006-12-25 08:51:47 +08:00
|
|
|
regs.regs[4] = (unsigned long)arg;
|
|
|
|
regs.regs[5] = (unsigned long)fn;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-12-25 08:51:47 +08:00
|
|
|
regs.pc = (unsigned long)kernel_thread_helper;
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 01:25:10 +08:00
|
|
|
regs.sr = SR_MD;
|
|
|
|
#if defined(CONFIG_SH_FPU)
|
|
|
|
regs.sr |= SR_FD;
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Ok, create the new process.. */
|
2008-09-21 12:56:39 +08:00
|
|
|
pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
|
|
|
|
®s, 0, NULL, NULL);
|
|
|
|
|
|
|
|
return pid;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2009-10-27 10:51:19 +08:00
|
|
|
EXPORT_SYMBOL(kernel_thread);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-01-12 17:52:00 +08:00
|
|
|
void start_thread(struct pt_regs *regs, unsigned long new_pc,
|
|
|
|
unsigned long new_sp)
|
|
|
|
{
|
|
|
|
set_fs(USER_DS);
|
|
|
|
|
|
|
|
regs->pr = 0;
|
|
|
|
regs->sr = SR_FD;
|
|
|
|
regs->pc = new_pc;
|
|
|
|
regs->regs[15] = new_sp;
|
2010-01-13 11:51:40 +08:00
|
|
|
|
|
|
|
free_thread_xstate(current);
|
2010-01-12 17:52:00 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(start_thread);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Free current thread data structures etc..
|
|
|
|
*/
|
|
|
|
void exit_thread(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_thread(void)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk = current;
|
2009-11-09 15:27:40 +08:00
|
|
|
|
|
|
|
flush_ptrace_hw_breakpoint(tsk);
|
|
|
|
|
|
|
|
#if defined(CONFIG_SH_FPU)
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Forget lazy FPU state */
|
2006-01-12 17:05:44 +08:00
|
|
|
clear_fpu(tsk, task_pt_regs(tsk));
|
2005-04-17 06:20:36 +08:00
|
|
|
clear_used_math();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void release_thread(struct task_struct *dead_task)
|
|
|
|
{
|
|
|
|
/* do nothing */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill in the fpu structure for a core dump.. */
|
|
|
|
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
|
|
|
|
{
|
|
|
|
int fpvalid = 0;
|
|
|
|
|
|
|
|
#if defined(CONFIG_SH_FPU)
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
|
|
|
|
fpvalid = !!tsk_used_math(tsk);
|
2008-09-21 18:04:55 +08:00
|
|
|
if (fpvalid)
|
|
|
|
fpvalid = !fpregs_get(tsk, NULL, 0,
|
|
|
|
sizeof(struct user_fpu_struct),
|
|
|
|
fpu, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
return fpvalid;
|
|
|
|
}
|
2009-10-27 10:51:19 +08:00
|
|
|
EXPORT_SYMBOL(dump_fpu);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 01:25:10 +08:00
|
|
|
/*
|
|
|
|
* This gets called before we allocate a new thread and copy
|
|
|
|
* the current task into it.
|
|
|
|
*/
|
|
|
|
void prepare_to_copy(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
unlazy_fpu(tsk, task_pt_regs(tsk));
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
asmlinkage void ret_from_fork(void);
|
|
|
|
|
2009-04-03 07:56:59 +08:00
|
|
|
int copy_thread(unsigned long clone_flags, unsigned long usp,
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long unused,
|
|
|
|
struct task_struct *p, struct pt_regs *regs)
|
|
|
|
{
|
2006-09-27 16:07:07 +08:00
|
|
|
struct thread_info *ti = task_thread_info(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct pt_regs *childregs;
|
|
|
|
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 01:25:10 +08:00
|
|
|
#if defined(CONFIG_SH_DSP)
|
2005-04-17 06:20:36 +08:00
|
|
|
struct task_struct *tsk = current;
|
|
|
|
|
2009-04-04 01:32:33 +08:00
|
|
|
if (is_dsp_enabled(tsk)) {
|
|
|
|
/* We can use the __save_dsp or just copy the struct:
|
|
|
|
* __save_dsp(p);
|
|
|
|
* p->thread.dsp_status.status |= SR_DSP
|
|
|
|
*/
|
|
|
|
p->thread.dsp_status = tsk->thread.dsp_status;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-01-12 17:05:44 +08:00
|
|
|
childregs = task_pt_regs(p);
|
2005-04-17 06:20:36 +08:00
|
|
|
*childregs = *regs;
|
|
|
|
|
|
|
|
if (user_mode(regs)) {
|
|
|
|
childregs->regs[15] = usp;
|
2006-09-27 16:07:07 +08:00
|
|
|
ti->addr_limit = USER_DS;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2007-02-28 17:35:42 +08:00
|
|
|
childregs->regs[15] = (unsigned long)childregs;
|
2006-09-27 16:07:07 +08:00
|
|
|
ti->addr_limit = KERNEL_DS;
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 01:25:10 +08:00
|
|
|
ti->status &= ~TS_USEDFPU;
|
|
|
|
p->fpu_counter = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-12-25 08:51:47 +08:00
|
|
|
|
2007-02-28 17:35:42 +08:00
|
|
|
if (clone_flags & CLONE_SETTLS)
|
2005-04-17 06:20:36 +08:00
|
|
|
childregs->gbr = childregs->regs[0];
|
2006-12-25 08:51:47 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
childregs->regs[0] = 0; /* Set return value for child */
|
|
|
|
|
|
|
|
p->thread.sp = (unsigned long) childregs;
|
|
|
|
p->thread.pc = (unsigned long) ret_from_fork;
|
|
|
|
|
2009-11-09 15:27:40 +08:00
|
|
|
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* switch_to(x,y) should switch tasks from x to y.
|
|
|
|
*
|
|
|
|
*/
|
2009-07-11 08:29:04 +08:00
|
|
|
__notrace_funcgraph struct task_struct *
|
|
|
|
__switch_to(struct task_struct *prev, struct task_struct *next)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-07-07 22:25:10 +08:00
|
|
|
struct thread_struct *next_t = &next->thread;
|
|
|
|
|
2006-01-12 17:05:44 +08:00
|
|
|
unlazy_fpu(prev, task_pt_regs(prev));
|
2009-07-07 22:25:10 +08:00
|
|
|
|
|
|
|
/* we're going to use this soon, after a few expensive things */
|
|
|
|
if (next->fpu_counter > 5)
|
2010-01-13 11:51:40 +08:00
|
|
|
prefetch(next_t->xstate);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-09-27 16:25:07 +08:00
|
|
|
#ifdef CONFIG_MMU
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Restore the kernel mode register
|
2006-12-25 08:51:47 +08:00
|
|
|
* k7 (r7_bank1)
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
asm volatile("ldc %0, r7_bank"
|
|
|
|
: /* no output */
|
2006-01-12 17:05:45 +08:00
|
|
|
: "r" (task_thread_info(next)));
|
2006-09-27 16:25:07 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-11-25 11:07:31 +08:00
|
|
|
/*
|
|
|
|
* If the task has used fpu the last 5 timeslices, just do a full
|
2009-07-07 22:25:10 +08:00
|
|
|
* restore of the math state immediately to avoid the trap; the
|
|
|
|
* chances of needing FPU soon are obviously high now
|
|
|
|
*/
|
2009-11-25 11:07:31 +08:00
|
|
|
if (next->fpu_counter > 5)
|
2010-01-13 11:51:40 +08:00
|
|
|
__fpu_state_restore();
|
2009-07-07 22:25:10 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return prev;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
|
|
|
|
unsigned long r6, unsigned long r7,
|
2006-11-21 10:16:57 +08:00
|
|
|
struct pt_regs __regs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_MMU
|
2007-05-14 16:26:34 +08:00
|
|
|
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
|
2006-11-21 10:16:57 +08:00
|
|
|
return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
#else
|
|
|
|
/* fork almost works, enough to trick you into looking elsewhere :-( */
|
|
|
|
return -EINVAL;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
|
|
|
|
unsigned long parent_tidptr,
|
|
|
|
unsigned long child_tidptr,
|
2006-11-21 10:16:57 +08:00
|
|
|
struct pt_regs __regs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-11-21 10:16:57 +08:00
|
|
|
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!newsp)
|
2006-11-21 10:16:57 +08:00
|
|
|
newsp = regs->regs[15];
|
|
|
|
return do_fork(clone_flags, newsp, regs, 0,
|
2006-12-25 08:51:47 +08:00
|
|
|
(int __user *)parent_tidptr,
|
|
|
|
(int __user *)child_tidptr);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is trivial, and on the face of it looks like it
|
|
|
|
* could equally well be done in user mode.
|
|
|
|
*
|
|
|
|
* Not so, for quite unobvious reasons - register pressure.
|
|
|
|
* In user mode vfork() cannot have a stack frame, and if
|
|
|
|
* done by calling the "clone()" system call directly, you
|
|
|
|
* do not have enough call-clobbered registers to hold all
|
|
|
|
* the information you need.
|
|
|
|
*/
|
|
|
|
asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
|
|
|
|
unsigned long r6, unsigned long r7,
|
2006-11-21 10:16:57 +08:00
|
|
|
struct pt_regs __regs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-11-21 10:16:57 +08:00
|
|
|
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
|
|
|
|
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs,
|
2005-04-17 06:20:36 +08:00
|
|
|
0, NULL, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sys_execve() executes a new program.
|
|
|
|
*/
|
2007-05-14 11:52:56 +08:00
|
|
|
asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
|
|
|
|
char __user * __user *uenvp, unsigned long r7,
|
2006-11-21 10:16:57 +08:00
|
|
|
struct pt_regs __regs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-11-21 10:16:57 +08:00
|
|
|
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
char *filename;
|
|
|
|
|
2007-05-14 11:52:56 +08:00
|
|
|
filename = getname(ufilename);
|
2005-04-17 06:20:36 +08:00
|
|
|
error = PTR_ERR(filename);
|
|
|
|
if (IS_ERR(filename))
|
|
|
|
goto out;
|
|
|
|
|
2007-05-14 11:52:56 +08:00
|
|
|
error = do_execve(filename, uargv, uenvp, regs);
|
2005-04-17 06:20:36 +08:00
|
|
|
putname(filename);
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long get_wchan(struct task_struct *p)
|
|
|
|
{
|
|
|
|
unsigned long pc;
|
|
|
|
|
|
|
|
if (!p || p == current || p->state == TASK_RUNNING)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The same comment as on the Alpha applies here, too ...
|
|
|
|
*/
|
|
|
|
pc = thread_saved_pc(p);
|
2007-07-26 16:46:07 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_FRAME_POINTER
|
2005-04-17 06:20:36 +08:00
|
|
|
if (in_sched_functions(pc)) {
|
2007-07-26 16:46:07 +08:00
|
|
|
unsigned long schedule_frame = (unsigned long)p->thread.sp;
|
2006-12-08 16:46:29 +08:00
|
|
|
return ((unsigned long *)schedule_frame)[21];
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-07-26 16:46:07 +08:00
|
|
|
#endif
|
2006-12-08 16:46:29 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return pc;
|
|
|
|
}
|