mirror of https://gitee.com/openkylin/linux.git
Merge branch 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc
* 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc: (39 commits) ptrace: do_wait(traced_leader_killed_by_mt_exec) can block forever ptrace: fix ptrace_signal() && STOP_DEQUEUED interaction connector: add an event for monitoring process tracers ptrace: dont send SIGSTOP on auto-attach if PT_SEIZED ptrace: mv send-SIGSTOP from do_fork() to ptrace_init_task() ptrace_init_task: initialize child->jobctl explicitly has_stopped_jobs: s/task_is_stopped/SIGNAL_STOP_STOPPED/ ptrace: make former thread ID available via PTRACE_GETEVENTMSG after PTRACE_EVENT_EXEC stop ptrace: wait_consider_task: s/same_thread_group/ptrace_reparented/ ptrace: kill real_parent_is_ptracer() in in favor of ptrace_reparented() ptrace: ptrace_reparented() should check same_thread_group() redefine thread_group_leader() as exit_signal >= 0 do not change dead_task->exit_signal kill task_detached() reparent_leader: check EXIT_DEAD instead of task_detached() make do_notify_parent() __must_check, update the callers __ptrace_detach: avoid task_detached(), check do_notify_parent() kill tracehook_notify_death() make do_notify_parent() return bool ptrace: s/tracehook_tracer_task()/ptrace_parent()/ ...
This commit is contained in:
commit
8209f53d79
|
@ -331,7 +331,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
|
|||
{
|
||||
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
|
||||
return;
|
||||
if (tracehook_consider_fatal_signal(current, SIGTRAP))
|
||||
if (current->ptrace)
|
||||
force_sig(SIGTRAP, current);
|
||||
}
|
||||
|
||||
|
@ -425,7 +425,7 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
|
|||
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
|
||||
return;
|
||||
if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
|
||||
if (tracehook_consider_fatal_signal(current, SIGTRAP))
|
||||
if (current->ptrace)
|
||||
force_sig(SIGTRAP, current);
|
||||
else
|
||||
signal = SIGILL;
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/connector.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
|
@ -166,6 +167,40 @@ void proc_sid_connector(struct task_struct *task)
|
|||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
|
||||
{
|
||||
struct cn_msg *msg;
|
||||
struct proc_event *ev;
|
||||
struct timespec ts;
|
||||
__u8 buffer[CN_PROC_MSG_SIZE];
|
||||
struct task_struct *tracer;
|
||||
|
||||
if (atomic_read(&proc_event_num_listeners) < 1)
|
||||
return;
|
||||
|
||||
msg = (struct cn_msg *)buffer;
|
||||
ev = (struct proc_event *)msg->data;
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||
ev->what = PROC_EVENT_PTRACE;
|
||||
ev->event_data.ptrace.process_pid = task->pid;
|
||||
ev->event_data.ptrace.process_tgid = task->tgid;
|
||||
if (ptrace_id == PTRACE_ATTACH) {
|
||||
ev->event_data.ptrace.tracer_pid = current->pid;
|
||||
ev->event_data.ptrace.tracer_tgid = current->tgid;
|
||||
} else if (ptrace_id == PTRACE_DETACH) {
|
||||
ev->event_data.ptrace.tracer_pid = 0;
|
||||
ev->event_data.ptrace.tracer_tgid = 0;
|
||||
} else
|
||||
return;
|
||||
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void proc_exit_connector(struct task_struct *task)
|
||||
{
|
||||
struct cn_msg *msg;
|
||||
|
|
27
fs/exec.c
27
fs/exec.c
|
@ -963,9 +963,18 @@ static int de_thread(struct task_struct *tsk)
|
|||
leader->group_leader = tsk;
|
||||
|
||||
tsk->exit_signal = SIGCHLD;
|
||||
leader->exit_signal = -1;
|
||||
|
||||
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
|
||||
leader->exit_state = EXIT_DEAD;
|
||||
|
||||
/*
|
||||
* We are going to release_task()->ptrace_unlink() silently,
|
||||
* the tracer can sleep in do_wait(). EXIT_DEAD guarantees
|
||||
* the tracer wont't block again waiting for this thread.
|
||||
*/
|
||||
if (unlikely(leader->ptrace))
|
||||
__wake_up_parent(leader, leader->parent);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
|
||||
release_task(leader);
|
||||
|
@ -1225,7 +1234,12 @@ int check_unsafe_exec(struct linux_binprm *bprm)
|
|||
unsigned n_fs;
|
||||
int res = 0;
|
||||
|
||||
bprm->unsafe = tracehook_unsafe_exec(p);
|
||||
if (p->ptrace) {
|
||||
if (p->ptrace & PT_PTRACE_CAP)
|
||||
bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
|
||||
else
|
||||
bprm->unsafe |= LSM_UNSAFE_PTRACE;
|
||||
}
|
||||
|
||||
n_fs = 1;
|
||||
spin_lock(&p->fs->lock);
|
||||
|
@ -1353,6 +1367,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
|
|||
unsigned int depth = bprm->recursion_depth;
|
||||
int try,retval;
|
||||
struct linux_binfmt *fmt;
|
||||
pid_t old_pid;
|
||||
|
||||
retval = security_bprm_check(bprm);
|
||||
if (retval)
|
||||
|
@ -1362,6 +1377,11 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
|
|||
if (retval)
|
||||
return retval;
|
||||
|
||||
/* Need to fetch pid before load_binary changes it */
|
||||
rcu_read_lock();
|
||||
old_pid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
|
||||
rcu_read_unlock();
|
||||
|
||||
retval = -ENOENT;
|
||||
for (try=0; try<2; try++) {
|
||||
read_lock(&binfmt_lock);
|
||||
|
@ -1381,7 +1401,8 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
|
|||
bprm->recursion_depth = depth;
|
||||
if (retval >= 0) {
|
||||
if (depth == 0)
|
||||
tracehook_report_exec(fmt, bprm, regs);
|
||||
ptrace_event(PTRACE_EVENT_EXEC,
|
||||
old_pid);
|
||||
put_binfmt(fmt);
|
||||
allow_write_access(bprm->file);
|
||||
if (bprm->file)
|
||||
|
@ -1769,7 +1790,7 @@ static int zap_process(struct task_struct *start, int exit_code)
|
|||
|
||||
t = start;
|
||||
do {
|
||||
task_clear_group_stop_pending(t);
|
||||
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
|
||||
if (t != current && t->mm) {
|
||||
sigaddset(&t->pending.signal, SIGKILL);
|
||||
signal_wake_up(t, 1);
|
||||
|
|
|
@ -172,7 +172,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
|
|||
task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
|
||||
tpid = 0;
|
||||
if (pid_alive(p)) {
|
||||
struct task_struct *tracer = tracehook_tracer_task(p);
|
||||
struct task_struct *tracer = ptrace_parent(p);
|
||||
if (tracer)
|
||||
tpid = task_pid_nr_ns(tracer, ns);
|
||||
}
|
||||
|
|
|
@ -216,7 +216,7 @@ static struct mm_struct *__check_mem_permission(struct task_struct *task)
|
|||
if (task_is_stopped_or_traced(task)) {
|
||||
int match;
|
||||
rcu_read_lock();
|
||||
match = (tracehook_tracer_task(task) == current);
|
||||
match = (ptrace_parent(task) == current);
|
||||
rcu_read_unlock();
|
||||
if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
|
||||
return mm;
|
||||
|
|
|
@ -53,6 +53,7 @@ struct proc_event {
|
|||
PROC_EVENT_UID = 0x00000004,
|
||||
PROC_EVENT_GID = 0x00000040,
|
||||
PROC_EVENT_SID = 0x00000080,
|
||||
PROC_EVENT_PTRACE = 0x00000100,
|
||||
/* "next" should be 0x00000400 */
|
||||
/* "last" is the last process event: exit */
|
||||
PROC_EVENT_EXIT = 0x80000000
|
||||
|
@ -95,6 +96,13 @@ struct proc_event {
|
|||
__kernel_pid_t process_tgid;
|
||||
} sid;
|
||||
|
||||
struct ptrace_proc_event {
|
||||
__kernel_pid_t process_pid;
|
||||
__kernel_pid_t process_tgid;
|
||||
__kernel_pid_t tracer_pid;
|
||||
__kernel_pid_t tracer_tgid;
|
||||
} ptrace;
|
||||
|
||||
struct exit_proc_event {
|
||||
__kernel_pid_t process_pid;
|
||||
__kernel_pid_t process_tgid;
|
||||
|
@ -109,6 +117,7 @@ void proc_fork_connector(struct task_struct *task);
|
|||
void proc_exec_connector(struct task_struct *task);
|
||||
void proc_id_connector(struct task_struct *task, int which_id);
|
||||
void proc_sid_connector(struct task_struct *task);
|
||||
void proc_ptrace_connector(struct task_struct *task, int which_id);
|
||||
void proc_exit_connector(struct task_struct *task);
|
||||
#else
|
||||
static inline void proc_fork_connector(struct task_struct *task)
|
||||
|
@ -124,6 +133,10 @@ static inline void proc_id_connector(struct task_struct *task,
|
|||
static inline void proc_sid_connector(struct task_struct *task)
|
||||
{}
|
||||
|
||||
static inline void proc_ptrace_connector(struct task_struct *task,
|
||||
int ptrace_id)
|
||||
{}
|
||||
|
||||
static inline void proc_exit_connector(struct task_struct *task)
|
||||
{}
|
||||
#endif /* CONFIG_PROC_EVENTS */
|
||||
|
|
|
@ -47,6 +47,13 @@
|
|||
#define PTRACE_GETREGSET 0x4204
|
||||
#define PTRACE_SETREGSET 0x4205
|
||||
|
||||
#define PTRACE_SEIZE 0x4206
|
||||
#define PTRACE_INTERRUPT 0x4207
|
||||
#define PTRACE_LISTEN 0x4208
|
||||
|
||||
/* flags in @data for PTRACE_SEIZE */
|
||||
#define PTRACE_SEIZE_DEVEL 0x80000000 /* temp flag for development */
|
||||
|
||||
/* options set using PTRACE_SETOPTIONS */
|
||||
#define PTRACE_O_TRACESYSGOOD 0x00000001
|
||||
#define PTRACE_O_TRACEFORK 0x00000002
|
||||
|
@ -65,6 +72,7 @@
|
|||
#define PTRACE_EVENT_EXEC 4
|
||||
#define PTRACE_EVENT_VFORK_DONE 5
|
||||
#define PTRACE_EVENT_EXIT 6
|
||||
#define PTRACE_EVENT_STOP 7
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
|
@ -77,16 +85,22 @@
|
|||
* flags. When the a task is stopped the ptracer owns task->ptrace.
|
||||
*/
|
||||
|
||||
#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
|
||||
#define PT_PTRACED 0x00000001
|
||||
#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
|
||||
#define PT_TRACESYSGOOD 0x00000004
|
||||
#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
|
||||
#define PT_TRACE_FORK 0x00000010
|
||||
#define PT_TRACE_VFORK 0x00000020
|
||||
#define PT_TRACE_CLONE 0x00000040
|
||||
#define PT_TRACE_EXEC 0x00000080
|
||||
#define PT_TRACE_VFORK_DONE 0x00000100
|
||||
#define PT_TRACE_EXIT 0x00000200
|
||||
|
||||
/* PT_TRACE_* event enable flags */
|
||||
#define PT_EVENT_FLAG_SHIFT 4
|
||||
#define PT_EVENT_FLAG(event) (1 << (PT_EVENT_FLAG_SHIFT + (event) - 1))
|
||||
|
||||
#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
|
||||
#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
|
||||
#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
|
||||
#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
|
||||
#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
|
||||
#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
|
||||
|
||||
#define PT_TRACE_MASK 0x000003f4
|
||||
|
||||
|
@ -105,7 +119,7 @@ extern long arch_ptrace(struct task_struct *child, long request,
|
|||
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
|
||||
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
|
||||
extern void ptrace_disable(struct task_struct *);
|
||||
extern int ptrace_check_attach(struct task_struct *task, int kill);
|
||||
extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
|
||||
extern int ptrace_request(struct task_struct *child, long request,
|
||||
unsigned long addr, unsigned long data);
|
||||
extern void ptrace_notify(int exit_code);
|
||||
|
@ -122,7 +136,7 @@ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
|
|||
|
||||
static inline int ptrace_reparented(struct task_struct *child)
|
||||
{
|
||||
return child->real_parent != child->parent;
|
||||
return !same_thread_group(child->real_parent, child->parent);
|
||||
}
|
||||
|
||||
static inline void ptrace_unlink(struct task_struct *child)
|
||||
|
@ -137,36 +151,56 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
|
|||
unsigned long data);
|
||||
|
||||
/**
|
||||
* task_ptrace - return %PT_* flags that apply to a task
|
||||
* @task: pointer to &task_struct in question
|
||||
* ptrace_parent - return the task that is tracing the given task
|
||||
* @task: task to consider
|
||||
*
|
||||
* Returns the %PT_* flags that apply to @task.
|
||||
* Returns %NULL if no one is tracing @task, or the &struct task_struct
|
||||
* pointer to its tracer.
|
||||
*
|
||||
* Must called under rcu_read_lock(). The pointer returned might be kept
|
||||
* live only by RCU. During exec, this may be called with task_lock() held
|
||||
* on @task, still held from when check_unsafe_exec() was called.
|
||||
*/
|
||||
static inline int task_ptrace(struct task_struct *task)
|
||||
static inline struct task_struct *ptrace_parent(struct task_struct *task)
|
||||
{
|
||||
return task->ptrace;
|
||||
if (unlikely(task->ptrace))
|
||||
return rcu_dereference(task->parent);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ptrace_event_enabled - test whether a ptrace event is enabled
|
||||
* @task: ptracee of interest
|
||||
* @event: %PTRACE_EVENT_* to test
|
||||
*
|
||||
* Test whether @event is enabled for ptracee @task.
|
||||
*
|
||||
* Returns %true if @event is enabled, %false otherwise.
|
||||
*/
|
||||
static inline bool ptrace_event_enabled(struct task_struct *task, int event)
|
||||
{
|
||||
return task->ptrace & PT_EVENT_FLAG(event);
|
||||
}
|
||||
|
||||
/**
|
||||
* ptrace_event - possibly stop for a ptrace event notification
|
||||
* @mask: %PT_* bit to check in @current->ptrace
|
||||
* @event: %PTRACE_EVENT_* value to report if @mask is set
|
||||
* @event: %PTRACE_EVENT_* value to report
|
||||
* @message: value for %PTRACE_GETEVENTMSG to return
|
||||
*
|
||||
* This checks the @mask bit to see if ptrace wants stops for this event.
|
||||
* If so we stop, reporting @event and @message to the ptrace parent.
|
||||
*
|
||||
* Returns nonzero if we did a ptrace notification, zero if not.
|
||||
* Check whether @event is enabled and, if so, report @event and @message
|
||||
* to the ptrace parent.
|
||||
*
|
||||
* Called without locks.
|
||||
*/
|
||||
static inline int ptrace_event(int mask, int event, unsigned long message)
|
||||
static inline void ptrace_event(int event, unsigned long message)
|
||||
{
|
||||
if (mask && likely(!(current->ptrace & mask)))
|
||||
return 0;
|
||||
current->ptrace_message = message;
|
||||
ptrace_notify((event << 8) | SIGTRAP);
|
||||
return 1;
|
||||
if (unlikely(ptrace_event_enabled(current, event))) {
|
||||
current->ptrace_message = message;
|
||||
ptrace_notify((event << 8) | SIGTRAP);
|
||||
} else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
|
||||
/* legacy EXEC report via SIGTRAP */
|
||||
send_sig(SIGTRAP, current, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -183,16 +217,24 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
|
|||
{
|
||||
INIT_LIST_HEAD(&child->ptrace_entry);
|
||||
INIT_LIST_HEAD(&child->ptraced);
|
||||
child->parent = child->real_parent;
|
||||
child->ptrace = 0;
|
||||
if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
|
||||
child->ptrace = current->ptrace;
|
||||
__ptrace_link(child, current->parent);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
atomic_set(&child->ptrace_bp_refcnt, 1);
|
||||
#endif
|
||||
child->jobctl = 0;
|
||||
child->ptrace = 0;
|
||||
child->parent = child->real_parent;
|
||||
|
||||
if (unlikely(ptrace) && current->ptrace) {
|
||||
child->ptrace = current->ptrace;
|
||||
__ptrace_link(child, current->parent);
|
||||
|
||||
if (child->ptrace & PT_SEIZED)
|
||||
task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
|
||||
else
|
||||
sigaddset(&child->pending.signal, SIGSTOP);
|
||||
|
||||
set_tsk_thread_flag(child, TIF_SIGPENDING);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1292,7 +1292,7 @@ struct task_struct {
|
|||
int exit_state;
|
||||
int exit_code, exit_signal;
|
||||
int pdeath_signal; /* The signal sent when the parent dies */
|
||||
unsigned int group_stop; /* GROUP_STOP_*, siglock protected */
|
||||
unsigned int jobctl; /* JOBCTL_*, siglock protected */
|
||||
/* ??? */
|
||||
unsigned int personality;
|
||||
unsigned did_exec:1;
|
||||
|
@ -1813,15 +1813,34 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
|||
#define used_math() tsk_used_math(current)
|
||||
|
||||
/*
|
||||
* task->group_stop flags
|
||||
* task->jobctl flags
|
||||
*/
|
||||
#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */
|
||||
#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */
|
||||
#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */
|
||||
#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */
|
||||
#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */
|
||||
#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
|
||||
|
||||
extern void task_clear_group_stop_pending(struct task_struct *task);
|
||||
#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
|
||||
#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
|
||||
#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
|
||||
#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
|
||||
#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
|
||||
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
|
||||
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
|
||||
|
||||
#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
|
||||
#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
|
||||
#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
|
||||
#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
|
||||
#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
|
||||
#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
|
||||
#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
|
||||
|
||||
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
|
||||
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
|
||||
|
||||
extern bool task_set_jobctl_pending(struct task_struct *task,
|
||||
unsigned int mask);
|
||||
extern void task_clear_jobctl_trapping(struct task_struct *task);
|
||||
extern void task_clear_jobctl_pending(struct task_struct *task,
|
||||
unsigned int mask);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
|
||||
|
@ -2136,7 +2155,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s
|
|||
spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
extern void block_all_signals(int (*notifier)(void *priv), void *priv,
|
||||
sigset_t *mask);
|
||||
|
@ -2151,7 +2170,7 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_
|
|||
extern int kill_pgrp(struct pid *pid, int sig, int priv);
|
||||
extern int kill_pid(struct pid *pid, int sig, int priv);
|
||||
extern int kill_proc_info(int, struct siginfo *, pid_t);
|
||||
extern int do_notify_parent(struct task_struct *, int);
|
||||
extern __must_check bool do_notify_parent(struct task_struct *, int);
|
||||
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
|
||||
extern void force_sig(int, struct task_struct *);
|
||||
extern int send_sig(int, struct task_struct *, int);
|
||||
|
@ -2275,8 +2294,10 @@ static inline int get_nr_threads(struct task_struct *tsk)
|
|||
return tsk->signal->nr_threads;
|
||||
}
|
||||
|
||||
/* de_thread depends on thread_group_leader not being a pid based check */
|
||||
#define thread_group_leader(p) (p == p->group_leader)
|
||||
static inline bool thread_group_leader(struct task_struct *p)
|
||||
{
|
||||
return p->exit_signal >= 0;
|
||||
}
|
||||
|
||||
/* Do to the insanities of de_thread it is possible for a process
|
||||
* to have the pid of the thread group leader without actually being
|
||||
|
@ -2309,11 +2330,6 @@ static inline int thread_group_empty(struct task_struct *p)
|
|||
#define delay_group_leader(p) \
|
||||
(thread_group_leader(p) && !thread_group_empty(p))
|
||||
|
||||
static inline int task_detached(struct task_struct *p)
|
||||
{
|
||||
return p->exit_signal == -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
|
||||
* subscriptions and synchronises with wait4(). Also used in procfs. Also
|
||||
|
|
|
@ -51,27 +51,12 @@
|
|||
#include <linux/security.h>
|
||||
struct linux_binprm;
|
||||
|
||||
/**
|
||||
* tracehook_expect_breakpoints - guess if task memory might be touched
|
||||
* @task: current task, making a new mapping
|
||||
*
|
||||
* Return nonzero if @task is expected to want breakpoint insertion in
|
||||
* its memory at some point. A zero return is no guarantee it won't
|
||||
* be done, but this is a hint that it's known to be likely.
|
||||
*
|
||||
* May be called with @task->mm->mmap_sem held for writing.
|
||||
*/
|
||||
static inline int tracehook_expect_breakpoints(struct task_struct *task)
|
||||
{
|
||||
return (task_ptrace(task) & PT_PTRACED) != 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* ptrace report for syscall entry and exit looks identical.
|
||||
*/
|
||||
static inline void ptrace_report_syscall(struct pt_regs *regs)
|
||||
{
|
||||
int ptrace = task_ptrace(current);
|
||||
int ptrace = current->ptrace;
|
||||
|
||||
if (!(ptrace & PT_PTRACED))
|
||||
return;
|
||||
|
@ -144,229 +129,6 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
|
|||
ptrace_report_syscall(regs);
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_unsafe_exec - check for exec declared unsafe due to tracing
|
||||
* @task: current task doing exec
|
||||
*
|
||||
* Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
|
||||
*
|
||||
* @task->signal->cred_guard_mutex is held by the caller through the do_execve().
|
||||
*/
|
||||
static inline int tracehook_unsafe_exec(struct task_struct *task)
|
||||
{
|
||||
int unsafe = 0;
|
||||
int ptrace = task_ptrace(task);
|
||||
if (ptrace & PT_PTRACED) {
|
||||
if (ptrace & PT_PTRACE_CAP)
|
||||
unsafe |= LSM_UNSAFE_PTRACE_CAP;
|
||||
else
|
||||
unsafe |= LSM_UNSAFE_PTRACE;
|
||||
}
|
||||
return unsafe;
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_tracer_task - return the task that is tracing the given task
|
||||
* @tsk: task to consider
|
||||
*
|
||||
* Returns NULL if no one is tracing @task, or the &struct task_struct
|
||||
* pointer to its tracer.
|
||||
*
|
||||
* Must called under rcu_read_lock(). The pointer returned might be kept
|
||||
* live only by RCU. During exec, this may be called with task_lock()
|
||||
* held on @task, still held from when tracehook_unsafe_exec() was called.
|
||||
*/
|
||||
static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
|
||||
{
|
||||
if (task_ptrace(tsk) & PT_PTRACED)
|
||||
return rcu_dereference(tsk->parent);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_report_exec - a successful exec was completed
|
||||
* @fmt: &struct linux_binfmt that performed the exec
|
||||
* @bprm: &struct linux_binprm containing exec details
|
||||
* @regs: user-mode register state
|
||||
*
|
||||
* An exec just completed, we are shortly going to return to user mode.
|
||||
* The freshly initialized register state can be seen and changed in @regs.
|
||||
* The name, file and other pointers in @bprm are still on hand to be
|
||||
* inspected, but will be freed as soon as this returns.
|
||||
*
|
||||
* Called with no locks, but with some kernel resources held live
|
||||
* and a reference on @fmt->module.
|
||||
*/
|
||||
static inline void tracehook_report_exec(struct linux_binfmt *fmt,
|
||||
struct linux_binprm *bprm,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
|
||||
unlikely(task_ptrace(current) & PT_PTRACED))
|
||||
send_sig(SIGTRAP, current, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_report_exit - task has begun to exit
|
||||
* @exit_code: pointer to value destined for @current->exit_code
|
||||
*
|
||||
* @exit_code points to the value passed to do_exit(), which tracing
|
||||
* might change here. This is almost the first thing in do_exit(),
|
||||
* before freeing any resources or setting the %PF_EXITING flag.
|
||||
*
|
||||
* Called with no locks held.
|
||||
*/
|
||||
static inline void tracehook_report_exit(long *exit_code)
|
||||
{
|
||||
ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_prepare_clone - prepare for new child to be cloned
|
||||
* @clone_flags: %CLONE_* flags from clone/fork/vfork system call
|
||||
*
|
||||
* This is called before a new user task is to be cloned.
|
||||
* Its return value will be passed to tracehook_finish_clone().
|
||||
*
|
||||
* Called with no locks held.
|
||||
*/
|
||||
static inline int tracehook_prepare_clone(unsigned clone_flags)
|
||||
{
|
||||
if (clone_flags & CLONE_UNTRACED)
|
||||
return 0;
|
||||
|
||||
if (clone_flags & CLONE_VFORK) {
|
||||
if (current->ptrace & PT_TRACE_VFORK)
|
||||
return PTRACE_EVENT_VFORK;
|
||||
} else if ((clone_flags & CSIGNAL) != SIGCHLD) {
|
||||
if (current->ptrace & PT_TRACE_CLONE)
|
||||
return PTRACE_EVENT_CLONE;
|
||||
} else if (current->ptrace & PT_TRACE_FORK)
|
||||
return PTRACE_EVENT_FORK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_finish_clone - new child created and being attached
|
||||
* @child: new child task
|
||||
* @clone_flags: %CLONE_* flags from clone/fork/vfork system call
|
||||
* @trace: return value from tracehook_prepare_clone()
|
||||
*
|
||||
* This is called immediately after adding @child to its parent's children list.
|
||||
* The @trace value is that returned by tracehook_prepare_clone().
|
||||
*
|
||||
* Called with current's siglock and write_lock_irq(&tasklist_lock) held.
|
||||
*/
|
||||
static inline void tracehook_finish_clone(struct task_struct *child,
|
||||
unsigned long clone_flags, int trace)
|
||||
{
|
||||
ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_report_clone - in parent, new child is about to start running
|
||||
* @regs: parent's user register state
|
||||
* @clone_flags: flags from parent's system call
|
||||
* @pid: new child's PID in the parent's namespace
|
||||
* @child: new child task
|
||||
*
|
||||
* Called after a child is set up, but before it has been started running.
|
||||
* This is not a good place to block, because the child has not started
|
||||
* yet. Suspend the child here if desired, and then block in
|
||||
* tracehook_report_clone_complete(). This must prevent the child from
|
||||
* self-reaping if tracehook_report_clone_complete() uses the @child
|
||||
* pointer; otherwise it might have died and been released by the time
|
||||
* tracehook_report_clone_complete() is called.
|
||||
*
|
||||
* Called with no locks held, but the child cannot run until this returns.
|
||||
*/
|
||||
static inline void tracehook_report_clone(struct pt_regs *regs,
|
||||
unsigned long clone_flags,
|
||||
pid_t pid, struct task_struct *child)
|
||||
{
|
||||
if (unlikely(task_ptrace(child))) {
|
||||
/*
|
||||
* It doesn't matter who attached/attaching to this
|
||||
* task, the pending SIGSTOP is right in any case.
|
||||
*/
|
||||
sigaddset(&child->pending.signal, SIGSTOP);
|
||||
set_tsk_thread_flag(child, TIF_SIGPENDING);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_report_clone_complete - new child is running
|
||||
* @trace: return value from tracehook_prepare_clone()
|
||||
* @regs: parent's user register state
|
||||
* @clone_flags: flags from parent's system call
|
||||
* @pid: new child's PID in the parent's namespace
|
||||
* @child: child task, already running
|
||||
*
|
||||
* This is called just after the child has started running. This is
|
||||
* just before the clone/fork syscall returns, or blocks for vfork
|
||||
* child completion if @clone_flags has the %CLONE_VFORK bit set.
|
||||
* The @child pointer may be invalid if a self-reaping child died and
|
||||
* tracehook_report_clone() took no action to prevent it from self-reaping.
|
||||
*
|
||||
* Called with no locks held.
|
||||
*/
|
||||
static inline void tracehook_report_clone_complete(int trace,
|
||||
struct pt_regs *regs,
|
||||
unsigned long clone_flags,
|
||||
pid_t pid,
|
||||
struct task_struct *child)
|
||||
{
|
||||
if (unlikely(trace))
|
||||
ptrace_event(0, trace, pid);
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_report_vfork_done - vfork parent's child has exited or exec'd
|
||||
* @child: child task, already running
|
||||
* @pid: new child's PID in the parent's namespace
|
||||
*
|
||||
* Called after a %CLONE_VFORK parent has waited for the child to complete.
|
||||
* The clone/vfork system call will return immediately after this.
|
||||
* The @child pointer may be invalid if a self-reaping child died and
|
||||
* tracehook_report_clone() took no action to prevent it from self-reaping.
|
||||
*
|
||||
* Called with no locks held.
|
||||
*/
|
||||
static inline void tracehook_report_vfork_done(struct task_struct *child,
|
||||
pid_t pid)
|
||||
{
|
||||
ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid);
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_prepare_release_task - task is being reaped, clean up tracing
|
||||
* @task: task in %EXIT_DEAD state
|
||||
*
|
||||
* This is called in release_task() just before @task gets finally reaped
|
||||
* and freed. This would be the ideal place to remove and clean up any
|
||||
* tracing-related state for @task.
|
||||
*
|
||||
* Called with no locks held.
|
||||
*/
|
||||
static inline void tracehook_prepare_release_task(struct task_struct *task)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_finish_release_task - final tracing clean-up
|
||||
* @task: task in %EXIT_DEAD state
|
||||
*
|
||||
* This is called in release_task() when @task is being in the middle of
|
||||
* being reaped. After this, there must be no tracing entanglements.
|
||||
*
|
||||
* Called with write_lock_irq(&tasklist_lock) held.
|
||||
*/
|
||||
static inline void tracehook_finish_release_task(struct task_struct *task)
|
||||
{
|
||||
ptrace_release_task(task);
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_signal_handler - signal handler setup is complete
|
||||
* @sig: number of signal being delivered
|
||||
|
@ -390,151 +152,6 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
|
|||
ptrace_notify(SIGTRAP);
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
|
||||
* @task: task receiving the signal
|
||||
* @sig: signal number being sent
|
||||
*
|
||||
* Return zero iff tracing doesn't care to examine this ignored signal,
|
||||
* so it can short-circuit normal delivery and never even get queued.
|
||||
*
|
||||
* Called with @task->sighand->siglock held.
|
||||
*/
|
||||
static inline int tracehook_consider_ignored_signal(struct task_struct *task,
|
||||
int sig)
|
||||
{
|
||||
return (task_ptrace(task) & PT_PTRACED) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_consider_fatal_signal - suppress special handling of fatal signal
|
||||
* @task: task receiving the signal
|
||||
* @sig: signal number being sent
|
||||
*
|
||||
* Return nonzero to prevent special handling of this termination signal.
|
||||
* Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is
|
||||
* ignored, in which case force_sig() is about to reset it to %SIG_DFL.
|
||||
* When this returns zero, this signal might cause a quick termination
|
||||
* that does not give the debugger a chance to intercept the signal.
|
||||
*
|
||||
* Called with or without @task->sighand->siglock held.
|
||||
*/
|
||||
static inline int tracehook_consider_fatal_signal(struct task_struct *task,
|
||||
int sig)
|
||||
{
|
||||
return (task_ptrace(task) & PT_PTRACED) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_force_sigpending - let tracing force signal_pending(current) on
|
||||
*
|
||||
* Called when recomputing our signal_pending() flag. Return nonzero
|
||||
* to force the signal_pending() flag on, so that tracehook_get_signal()
|
||||
* will be called before the next return to user mode.
|
||||
*
|
||||
* Called with @current->sighand->siglock held.
|
||||
*/
|
||||
static inline int tracehook_force_sigpending(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_get_signal - deliver synthetic signal to traced task
|
||||
* @task: @current
|
||||
* @regs: task_pt_regs(@current)
|
||||
* @info: details of synthetic signal
|
||||
* @return_ka: sigaction for synthetic signal
|
||||
*
|
||||
* Return zero to check for a real pending signal normally.
|
||||
* Return -1 after releasing the siglock to repeat the check.
|
||||
* Return a signal number to induce an artificial signal delivery,
|
||||
* setting *@info and *@return_ka to specify its details and behavior.
|
||||
*
|
||||
* The @return_ka->sa_handler value controls the disposition of the
|
||||
* signal, no matter the signal number. For %SIG_DFL, the return value
|
||||
* is a representative signal to indicate the behavior (e.g. %SIGTERM
|
||||
* for death, %SIGQUIT for core dump, %SIGSTOP for job control stop,
|
||||
* %SIGTSTP for stop unless in an orphaned pgrp), but the signal number
|
||||
* reported will be @info->si_signo instead.
|
||||
*
|
||||
* Called with @task->sighand->siglock held, before dequeuing pending signals.
|
||||
*/
|
||||
static inline int tracehook_get_signal(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
siginfo_t *info,
|
||||
struct k_sigaction *return_ka)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_finish_jctl - report about return from job control stop
|
||||
*
|
||||
* This is called by do_signal_stop() after wakeup.
|
||||
*/
|
||||
static inline void tracehook_finish_jctl(void)
|
||||
{
|
||||
}
|
||||
|
||||
#define DEATH_REAP -1
|
||||
#define DEATH_DELAYED_GROUP_LEADER -2
|
||||
|
||||
/**
|
||||
* tracehook_notify_death - task is dead, ready to notify parent
|
||||
* @task: @current task now exiting
|
||||
* @death_cookie: value to pass to tracehook_report_death()
|
||||
* @group_dead: nonzero if this was the last thread in the group to die
|
||||
*
|
||||
* A return value >= 0 means call do_notify_parent() with that signal
|
||||
* number. Negative return value can be %DEATH_REAP to self-reap right
|
||||
* now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our
|
||||
* parent. Note that a return value of 0 means a do_notify_parent() call
|
||||
* that sends no signal, but still wakes up a parent blocked in wait*().
|
||||
*
|
||||
* Called with write_lock_irq(&tasklist_lock) held.
|
||||
*/
|
||||
static inline int tracehook_notify_death(struct task_struct *task,
|
||||
void **death_cookie, int group_dead)
|
||||
{
|
||||
if (task_detached(task))
|
||||
return task->ptrace ? SIGCHLD : DEATH_REAP;
|
||||
|
||||
/*
|
||||
* If something other than our normal parent is ptracing us, then
|
||||
* send it a SIGCHLD instead of honoring exit_signal. exit_signal
|
||||
* only has special meaning to our real parent.
|
||||
*/
|
||||
if (thread_group_empty(task) && !ptrace_reparented(task))
|
||||
return task->exit_signal;
|
||||
|
||||
return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER;
|
||||
}
|
||||
|
||||
/**
|
||||
* tracehook_report_death - task is dead and ready to be reaped
|
||||
* @task: @current task now exiting
|
||||
* @signal: return value from tracheook_notify_death()
|
||||
* @death_cookie: value passed back from tracehook_notify_death()
|
||||
* @group_dead: nonzero if this was the last thread in the group to die
|
||||
*
|
||||
* Thread has just become a zombie or is about to self-reap. If positive,
|
||||
* @signal is the signal number just sent to the parent (usually %SIGCHLD).
|
||||
* If @signal is %DEATH_REAP, this thread will self-reap. If @signal is
|
||||
* %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie.
|
||||
* The @death_cookie was passed back by tracehook_notify_death().
|
||||
*
|
||||
* If normal reaping is not inhibited, @task->exit_state might be changing
|
||||
* in parallel.
|
||||
*
|
||||
* Called without locks.
|
||||
*/
|
||||
static inline void tracehook_report_death(struct task_struct *task,
|
||||
int signal, void *death_cookie,
|
||||
int group_dead)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef TIF_NOTIFY_RESUME
|
||||
/**
|
||||
* set_notify_resume - cause tracehook_notify_resume() to be called
|
||||
|
|
|
@ -169,7 +169,6 @@ void release_task(struct task_struct * p)
|
|||
struct task_struct *leader;
|
||||
int zap_leader;
|
||||
repeat:
|
||||
tracehook_prepare_release_task(p);
|
||||
/* don't need to get the RCU readlock here - the process is dead and
|
||||
* can't be modifying its own credentials. But shut RCU-lockdep up */
|
||||
rcu_read_lock();
|
||||
|
@ -179,7 +178,7 @@ void release_task(struct task_struct * p)
|
|||
proc_flush_task(p);
|
||||
|
||||
write_lock_irq(&tasklist_lock);
|
||||
tracehook_finish_release_task(p);
|
||||
ptrace_release_task(p);
|
||||
__exit_signal(p);
|
||||
|
||||
/*
|
||||
|
@ -190,22 +189,12 @@ void release_task(struct task_struct * p)
|
|||
zap_leader = 0;
|
||||
leader = p->group_leader;
|
||||
if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
|
||||
BUG_ON(task_detached(leader));
|
||||
do_notify_parent(leader, leader->exit_signal);
|
||||
/*
|
||||
* If we were the last child thread and the leader has
|
||||
* exited already, and the leader's parent ignores SIGCHLD,
|
||||
* then we are the one who should release the leader.
|
||||
*
|
||||
* do_notify_parent() will have marked it self-reaping in
|
||||
* that case.
|
||||
*/
|
||||
zap_leader = task_detached(leader);
|
||||
|
||||
/*
|
||||
* This maintains the invariant that release_task()
|
||||
* only runs on a task in EXIT_DEAD, just for sanity.
|
||||
*/
|
||||
zap_leader = do_notify_parent(leader, leader->exit_signal);
|
||||
if (zap_leader)
|
||||
leader->exit_state = EXIT_DEAD;
|
||||
}
|
||||
|
@ -277,18 +266,16 @@ int is_current_pgrp_orphaned(void)
|
|||
return retval;
|
||||
}
|
||||
|
||||
static int has_stopped_jobs(struct pid *pgrp)
|
||||
static bool has_stopped_jobs(struct pid *pgrp)
|
||||
{
|
||||
int retval = 0;
|
||||
struct task_struct *p;
|
||||
|
||||
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
|
||||
if (!task_is_stopped(p))
|
||||
continue;
|
||||
retval = 1;
|
||||
break;
|
||||
if (p->signal->flags & SIGNAL_STOP_STOPPED)
|
||||
return true;
|
||||
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
|
||||
return retval;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -751,7 +738,7 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
|
|||
{
|
||||
list_move_tail(&p->sibling, &p->real_parent->children);
|
||||
|
||||
if (task_detached(p))
|
||||
if (p->exit_state == EXIT_DEAD)
|
||||
return;
|
||||
/*
|
||||
* If this is a threaded reparent there is no need to
|
||||
|
@ -764,10 +751,9 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
|
|||
p->exit_signal = SIGCHLD;
|
||||
|
||||
/* If it has exited notify the new parent about this child's death. */
|
||||
if (!task_ptrace(p) &&
|
||||
if (!p->ptrace &&
|
||||
p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
|
||||
do_notify_parent(p, p->exit_signal);
|
||||
if (task_detached(p)) {
|
||||
if (do_notify_parent(p, p->exit_signal)) {
|
||||
p->exit_state = EXIT_DEAD;
|
||||
list_move_tail(&p->sibling, dead);
|
||||
}
|
||||
|
@ -794,7 +780,7 @@ static void forget_original_parent(struct task_struct *father)
|
|||
do {
|
||||
t->real_parent = reaper;
|
||||
if (t->parent == father) {
|
||||
BUG_ON(task_ptrace(t));
|
||||
BUG_ON(t->ptrace);
|
||||
t->parent = t->real_parent;
|
||||
}
|
||||
if (t->pdeath_signal)
|
||||
|
@ -819,8 +805,7 @@ static void forget_original_parent(struct task_struct *father)
|
|||
*/
|
||||
static void exit_notify(struct task_struct *tsk, int group_dead)
|
||||
{
|
||||
int signal;
|
||||
void *cookie;
|
||||
bool autoreap;
|
||||
|
||||
/*
|
||||
* This does two things:
|
||||
|
@ -851,26 +836,33 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
|
|||
* we have changed execution domain as these two values started
|
||||
* the same after a fork.
|
||||
*/
|
||||
if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
|
||||
if (thread_group_leader(tsk) && tsk->exit_signal != SIGCHLD &&
|
||||
(tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
|
||||
tsk->self_exec_id != tsk->parent_exec_id))
|
||||
tsk->exit_signal = SIGCHLD;
|
||||
|
||||
signal = tracehook_notify_death(tsk, &cookie, group_dead);
|
||||
if (signal >= 0)
|
||||
signal = do_notify_parent(tsk, signal);
|
||||
if (unlikely(tsk->ptrace)) {
|
||||
int sig = thread_group_leader(tsk) &&
|
||||
thread_group_empty(tsk) &&
|
||||
!ptrace_reparented(tsk) ?
|
||||
tsk->exit_signal : SIGCHLD;
|
||||
autoreap = do_notify_parent(tsk, sig);
|
||||
} else if (thread_group_leader(tsk)) {
|
||||
autoreap = thread_group_empty(tsk) &&
|
||||
do_notify_parent(tsk, tsk->exit_signal);
|
||||
} else {
|
||||
autoreap = true;
|
||||
}
|
||||
|
||||
tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
|
||||
tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
|
||||
|
||||
/* mt-exec, de_thread() is waiting for group leader */
|
||||
if (unlikely(tsk->signal->notify_count < 0))
|
||||
wake_up_process(tsk->signal->group_exit_task);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
|
||||
tracehook_report_death(tsk, signal, cookie, group_dead);
|
||||
|
||||
/* If the process is dead, release it - nobody will wait for it */
|
||||
if (signal == DEATH_REAP)
|
||||
if (autoreap)
|
||||
release_task(tsk);
|
||||
}
|
||||
|
||||
|
@ -923,7 +915,7 @@ NORET_TYPE void do_exit(long code)
|
|||
*/
|
||||
set_fs(USER_DS);
|
||||
|
||||
tracehook_report_exit(&code);
|
||||
ptrace_event(PTRACE_EVENT_EXIT, code);
|
||||
|
||||
validate_creds_for_do_exit(tsk);
|
||||
|
||||
|
@ -1235,9 +1227,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
|
|||
traced = ptrace_reparented(p);
|
||||
/*
|
||||
* It can be ptraced but not reparented, check
|
||||
* !task_detached() to filter out sub-threads.
|
||||
* thread_group_leader() to filter out sub-threads.
|
||||
*/
|
||||
if (likely(!traced) && likely(!task_detached(p))) {
|
||||
if (likely(!traced) && thread_group_leader(p)) {
|
||||
struct signal_struct *psig;
|
||||
struct signal_struct *sig;
|
||||
unsigned long maxrss;
|
||||
|
@ -1345,16 +1337,13 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
|
|||
/* We dropped tasklist, ptracer could die and untrace */
|
||||
ptrace_unlink(p);
|
||||
/*
|
||||
* If this is not a detached task, notify the parent.
|
||||
* If it's still not detached after that, don't release
|
||||
* it now.
|
||||
* If this is not a sub-thread, notify the parent.
|
||||
* If parent wants a zombie, don't release it now.
|
||||
*/
|
||||
if (!task_detached(p)) {
|
||||
do_notify_parent(p, p->exit_signal);
|
||||
if (!task_detached(p)) {
|
||||
p->exit_state = EXIT_ZOMBIE;
|
||||
p = NULL;
|
||||
}
|
||||
if (thread_group_leader(p) &&
|
||||
!do_notify_parent(p, p->exit_signal)) {
|
||||
p->exit_state = EXIT_ZOMBIE;
|
||||
p = NULL;
|
||||
}
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
}
|
||||
|
@ -1367,7 +1356,8 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
|
|||
static int *task_stopped_code(struct task_struct *p, bool ptrace)
|
||||
{
|
||||
if (ptrace) {
|
||||
if (task_is_stopped_or_traced(p))
|
||||
if (task_is_stopped_or_traced(p) &&
|
||||
!(p->jobctl & JOBCTL_LISTENING))
|
||||
return &p->exit_code;
|
||||
} else {
|
||||
if (p->signal->flags & SIGNAL_STOP_STOPPED)
|
||||
|
@ -1563,7 +1553,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
|
|||
* Notification and reaping will be cascaded to the real
|
||||
* parent when the ptracer detaches.
|
||||
*/
|
||||
if (likely(!ptrace) && unlikely(task_ptrace(p))) {
|
||||
if (likely(!ptrace) && unlikely(p->ptrace)) {
|
||||
/* it will become visible, clear notask_error */
|
||||
wo->notask_error = 0;
|
||||
return 0;
|
||||
|
@ -1606,8 +1596,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
|
|||
* own children, it should create a separate process which
|
||||
* takes the role of real parent.
|
||||
*/
|
||||
if (likely(!ptrace) && task_ptrace(p) &&
|
||||
same_thread_group(p->parent, p->real_parent))
|
||||
if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include <linux/swap.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/futex.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/kthread.h>
|
||||
|
@ -1340,7 +1339,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
}
|
||||
|
||||
if (likely(p->pid)) {
|
||||
tracehook_finish_clone(p, clone_flags, trace);
|
||||
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
|
||||
|
||||
if (thread_group_leader(p)) {
|
||||
if (is_child_reaper(pid))
|
||||
|
@ -1481,10 +1480,22 @@ long do_fork(unsigned long clone_flags,
|
|||
}
|
||||
|
||||
/*
|
||||
* When called from kernel_thread, don't do user tracing stuff.
|
||||
* Determine whether and which event to report to ptracer. When
|
||||
* called from kernel_thread or CLONE_UNTRACED is explicitly
|
||||
* requested, no event is reported; otherwise, report if the event
|
||||
* for the type of forking is enabled.
|
||||
*/
|
||||
if (likely(user_mode(regs)))
|
||||
trace = tracehook_prepare_clone(clone_flags);
|
||||
if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) {
|
||||
if (clone_flags & CLONE_VFORK)
|
||||
trace = PTRACE_EVENT_VFORK;
|
||||
else if ((clone_flags & CSIGNAL) != SIGCHLD)
|
||||
trace = PTRACE_EVENT_CLONE;
|
||||
else
|
||||
trace = PTRACE_EVENT_FORK;
|
||||
|
||||
if (likely(!ptrace_event_enabled(current, trace)))
|
||||
trace = 0;
|
||||
}
|
||||
|
||||
p = copy_process(clone_flags, stack_start, regs, stack_size,
|
||||
child_tidptr, NULL, trace);
|
||||
|
@ -1508,26 +1519,26 @@ long do_fork(unsigned long clone_flags,
|
|||
}
|
||||
|
||||
audit_finish_fork(p);
|
||||
tracehook_report_clone(regs, clone_flags, nr, p);
|
||||
|
||||
/*
|
||||
* We set PF_STARTING at creation in case tracing wants to
|
||||
* use this to distinguish a fully live task from one that
|
||||
* hasn't gotten to tracehook_report_clone() yet. Now we
|
||||
* clear it and set the child going.
|
||||
* hasn't finished SIGSTOP raising yet. Now we clear it
|
||||
* and set the child going.
|
||||
*/
|
||||
p->flags &= ~PF_STARTING;
|
||||
|
||||
wake_up_new_task(p);
|
||||
|
||||
tracehook_report_clone_complete(trace, regs,
|
||||
clone_flags, nr, p);
|
||||
/* forking complete and child started to run, tell ptracer */
|
||||
if (unlikely(trace))
|
||||
ptrace_event(trace, nr);
|
||||
|
||||
if (clone_flags & CLONE_VFORK) {
|
||||
freezer_do_not_count();
|
||||
wait_for_completion(&vfork);
|
||||
freezer_count();
|
||||
tracehook_report_vfork_done(p, nr);
|
||||
ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
|
||||
}
|
||||
} else {
|
||||
nr = PTR_ERR(p);
|
||||
|
|
197
kernel/ptrace.c
197
kernel/ptrace.c
|
@ -23,8 +23,15 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <linux/regset.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/cn_proc.h>
|
||||
|
||||
|
||||
static int ptrace_trapping_sleep_fn(void *flags)
|
||||
{
|
||||
schedule();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* ptrace a task: make the debugger its new parent and
|
||||
* move it to the ptrace list.
|
||||
|
@ -77,13 +84,20 @@ void __ptrace_unlink(struct task_struct *child)
|
|||
spin_lock(&child->sighand->siglock);
|
||||
|
||||
/*
|
||||
* Reinstate GROUP_STOP_PENDING if group stop is in effect and
|
||||
* Clear all pending traps and TRAPPING. TRAPPING should be
|
||||
* cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
|
||||
*/
|
||||
task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
|
||||
task_clear_jobctl_trapping(child);
|
||||
|
||||
/*
|
||||
* Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
|
||||
* @child isn't dead.
|
||||
*/
|
||||
if (!(child->flags & PF_EXITING) &&
|
||||
(child->signal->flags & SIGNAL_STOP_STOPPED ||
|
||||
child->signal->group_stop_count))
|
||||
child->group_stop |= GROUP_STOP_PENDING;
|
||||
child->jobctl |= JOBCTL_STOP_PENDING;
|
||||
|
||||
/*
|
||||
* If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
|
||||
|
@ -91,16 +105,30 @@ void __ptrace_unlink(struct task_struct *child)
|
|||
* is in TASK_TRACED; otherwise, we might unduly disrupt
|
||||
* TASK_KILLABLE sleeps.
|
||||
*/
|
||||
if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child))
|
||||
if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
|
||||
signal_wake_up(child, task_is_traced(child));
|
||||
|
||||
spin_unlock(&child->sighand->siglock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that we have indeed attached to the thing..
|
||||
/**
|
||||
* ptrace_check_attach - check whether ptracee is ready for ptrace operation
|
||||
* @child: ptracee to check for
|
||||
* @ignore_state: don't check whether @child is currently %TASK_TRACED
|
||||
*
|
||||
* Check whether @child is being ptraced by %current and ready for further
|
||||
* ptrace operations. If @ignore_state is %false, @child also should be in
|
||||
* %TASK_TRACED state and on return the child is guaranteed to be traced
|
||||
* and not executing. If @ignore_state is %true, @child can be in any
|
||||
* state.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Grabs and releases tasklist_lock and @child->sighand->siglock.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success, -ESRCH if %child is not ready.
|
||||
*/
|
||||
int ptrace_check_attach(struct task_struct *child, int kill)
|
||||
int ptrace_check_attach(struct task_struct *child, bool ignore_state)
|
||||
{
|
||||
int ret = -ESRCH;
|
||||
|
||||
|
@ -119,13 +147,14 @@ int ptrace_check_attach(struct task_struct *child, int kill)
|
|||
*/
|
||||
spin_lock_irq(&child->sighand->siglock);
|
||||
WARN_ON_ONCE(task_is_stopped(child));
|
||||
if (task_is_traced(child) || kill)
|
||||
if (ignore_state || (task_is_traced(child) &&
|
||||
!(child->jobctl & JOBCTL_LISTENING)))
|
||||
ret = 0;
|
||||
spin_unlock_irq(&child->sighand->siglock);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
if (!ret && !kill)
|
||||
if (!ret && !ignore_state)
|
||||
ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
|
||||
|
||||
/* All systems go.. */
|
||||
|
@ -182,11 +211,28 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
|
|||
return !err;
|
||||
}
|
||||
|
||||
static int ptrace_attach(struct task_struct *task)
|
||||
static int ptrace_attach(struct task_struct *task, long request,
|
||||
unsigned long flags)
|
||||
{
|
||||
bool wait_trap = false;
|
||||
bool seize = (request == PTRACE_SEIZE);
|
||||
int retval;
|
||||
|
||||
/*
|
||||
* SEIZE will enable new ptrace behaviors which will be implemented
|
||||
* gradually. SEIZE_DEVEL is used to prevent applications
|
||||
* expecting full SEIZE behaviors trapping on kernel commits which
|
||||
* are still in the process of implementing them.
|
||||
*
|
||||
* Only test programs for new ptrace behaviors being implemented
|
||||
* should set SEIZE_DEVEL. If unset, SEIZE will fail with -EIO.
|
||||
*
|
||||
* Once SEIZE behaviors are completely implemented, this flag and
|
||||
* the following test will be removed.
|
||||
*/
|
||||
retval = -EIO;
|
||||
if (seize && !(flags & PTRACE_SEIZE_DEVEL))
|
||||
goto out;
|
||||
|
||||
audit_ptrace(task);
|
||||
|
||||
retval = -EPERM;
|
||||
|
@ -218,16 +264,21 @@ static int ptrace_attach(struct task_struct *task)
|
|||
goto unlock_tasklist;
|
||||
|
||||
task->ptrace = PT_PTRACED;
|
||||
if (seize)
|
||||
task->ptrace |= PT_SEIZED;
|
||||
if (task_ns_capable(task, CAP_SYS_PTRACE))
|
||||
task->ptrace |= PT_PTRACE_CAP;
|
||||
|
||||
__ptrace_link(task, current);
|
||||
send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
|
||||
|
||||
/* SEIZE doesn't trap tracee on attach */
|
||||
if (!seize)
|
||||
send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
|
||||
|
||||
spin_lock(&task->sighand->siglock);
|
||||
|
||||
/*
|
||||
* If the task is already STOPPED, set GROUP_STOP_PENDING and
|
||||
* If the task is already STOPPED, set JOBCTL_TRAP_STOP and
|
||||
* TRAPPING, and kick it so that it transits to TRACED. TRAPPING
|
||||
* will be cleared if the child completes the transition or any
|
||||
* event which clears the group stop states happens. We'll wait
|
||||
|
@ -243,11 +294,9 @@ static int ptrace_attach(struct task_struct *task)
|
|||
* The following task_is_stopped() test is safe as both transitions
|
||||
* in and out of STOPPED are protected by siglock.
|
||||
*/
|
||||
if (task_is_stopped(task)) {
|
||||
task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
|
||||
if (task_is_stopped(task) &&
|
||||
task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
|
||||
signal_wake_up(task, 1);
|
||||
wait_trap = true;
|
||||
}
|
||||
|
||||
spin_unlock(&task->sighand->siglock);
|
||||
|
||||
|
@ -257,9 +306,12 @@ static int ptrace_attach(struct task_struct *task)
|
|||
unlock_creds:
|
||||
mutex_unlock(&task->signal->cred_guard_mutex);
|
||||
out:
|
||||
if (wait_trap)
|
||||
wait_event(current->signal->wait_chldexit,
|
||||
!(task->group_stop & GROUP_STOP_TRAPPING));
|
||||
if (!retval) {
|
||||
wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
|
||||
ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
|
||||
proc_ptrace_connector(task, PTRACE_ATTACH);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -322,25 +374,27 @@ static int ignoring_children(struct sighand_struct *sigh)
|
|||
*/
|
||||
static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
|
||||
{
|
||||
bool dead;
|
||||
|
||||
__ptrace_unlink(p);
|
||||
|
||||
if (p->exit_state == EXIT_ZOMBIE) {
|
||||
if (!task_detached(p) && thread_group_empty(p)) {
|
||||
if (!same_thread_group(p->real_parent, tracer))
|
||||
do_notify_parent(p, p->exit_signal);
|
||||
else if (ignoring_children(tracer->sighand)) {
|
||||
__wake_up_parent(p, tracer);
|
||||
p->exit_signal = -1;
|
||||
}
|
||||
}
|
||||
if (task_detached(p)) {
|
||||
/* Mark it as in the process of being reaped. */
|
||||
p->exit_state = EXIT_DEAD;
|
||||
return true;
|
||||
if (p->exit_state != EXIT_ZOMBIE)
|
||||
return false;
|
||||
|
||||
dead = !thread_group_leader(p);
|
||||
|
||||
if (!dead && thread_group_empty(p)) {
|
||||
if (!same_thread_group(p->real_parent, tracer))
|
||||
dead = do_notify_parent(p, p->exit_signal);
|
||||
else if (ignoring_children(tracer->sighand)) {
|
||||
__wake_up_parent(p, tracer);
|
||||
dead = true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
/* Mark it as in the process of being reaped. */
|
||||
if (dead)
|
||||
p->exit_state = EXIT_DEAD;
|
||||
return dead;
|
||||
}
|
||||
|
||||
static int ptrace_detach(struct task_struct *child, unsigned int data)
|
||||
|
@ -365,6 +419,7 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
|
|||
}
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
|
||||
proc_ptrace_connector(child, PTRACE_DETACH);
|
||||
if (unlikely(dead))
|
||||
release_task(child);
|
||||
|
||||
|
@ -611,10 +666,12 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
|
|||
int ptrace_request(struct task_struct *child, long request,
|
||||
unsigned long addr, unsigned long data)
|
||||
{
|
||||
bool seized = child->ptrace & PT_SEIZED;
|
||||
int ret = -EIO;
|
||||
siginfo_t siginfo;
|
||||
siginfo_t siginfo, *si;
|
||||
void __user *datavp = (void __user *) data;
|
||||
unsigned long __user *datalp = datavp;
|
||||
unsigned long flags;
|
||||
|
||||
switch (request) {
|
||||
case PTRACE_PEEKTEXT:
|
||||
|
@ -647,6 +704,62 @@ int ptrace_request(struct task_struct *child, long request,
|
|||
ret = ptrace_setsiginfo(child, &siginfo);
|
||||
break;
|
||||
|
||||
case PTRACE_INTERRUPT:
|
||||
/*
|
||||
* Stop tracee without any side-effect on signal or job
|
||||
* control. At least one trap is guaranteed to happen
|
||||
* after this request. If @child is already trapped, the
|
||||
* current trap is not disturbed and another trap will
|
||||
* happen after the current trap is ended with PTRACE_CONT.
|
||||
*
|
||||
* The actual trap might not be PTRACE_EVENT_STOP trap but
|
||||
* the pending condition is cleared regardless.
|
||||
*/
|
||||
if (unlikely(!seized || !lock_task_sighand(child, &flags)))
|
||||
break;
|
||||
|
||||
/*
|
||||
* INTERRUPT doesn't disturb existing trap sans one
|
||||
* exception. If ptracer issued LISTEN for the current
|
||||
* STOP, this INTERRUPT should clear LISTEN and re-trap
|
||||
* tracee into STOP.
|
||||
*/
|
||||
if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
|
||||
signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
|
||||
|
||||
unlock_task_sighand(child, &flags);
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
case PTRACE_LISTEN:
|
||||
/*
|
||||
* Listen for events. Tracee must be in STOP. It's not
|
||||
* resumed per-se but is not considered to be in TRACED by
|
||||
* wait(2) or ptrace(2). If an async event (e.g. group
|
||||
* stop state change) happens, tracee will enter STOP trap
|
||||
* again. Alternatively, ptracer can issue INTERRUPT to
|
||||
* finish listening and re-trap tracee into STOP.
|
||||
*/
|
||||
if (unlikely(!seized || !lock_task_sighand(child, &flags)))
|
||||
break;
|
||||
|
||||
si = child->last_siginfo;
|
||||
if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP))
|
||||
break;
|
||||
|
||||
child->jobctl |= JOBCTL_LISTENING;
|
||||
|
||||
/*
|
||||
* If NOTIFY is set, it means event happened between start
|
||||
* of this trap and now. Trigger re-trap immediately.
|
||||
*/
|
||||
if (child->jobctl & JOBCTL_TRAP_NOTIFY)
|
||||
signal_wake_up(child, true);
|
||||
|
||||
unlock_task_sighand(child, &flags);
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
case PTRACE_DETACH: /* detach a process that was attached. */
|
||||
ret = ptrace_detach(child, data);
|
||||
break;
|
||||
|
@ -761,8 +874,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (request == PTRACE_ATTACH) {
|
||||
ret = ptrace_attach(child);
|
||||
if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
|
||||
ret = ptrace_attach(child, request, data);
|
||||
/*
|
||||
* Some architectures need to do book-keeping after
|
||||
* a ptrace attach.
|
||||
|
@ -772,7 +885,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
|
|||
goto out_put_task_struct;
|
||||
}
|
||||
|
||||
ret = ptrace_check_attach(child, request == PTRACE_KILL);
|
||||
ret = ptrace_check_attach(child, request == PTRACE_KILL ||
|
||||
request == PTRACE_INTERRUPT);
|
||||
if (ret < 0)
|
||||
goto out_put_task_struct;
|
||||
|
||||
|
@ -903,8 +1017,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (request == PTRACE_ATTACH) {
|
||||
ret = ptrace_attach(child);
|
||||
if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
|
||||
ret = ptrace_attach(child, request, data);
|
||||
/*
|
||||
* Some architectures need to do book-keeping after
|
||||
* a ptrace attach.
|
||||
|
@ -914,7 +1028,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
|
|||
goto out_put_task_struct;
|
||||
}
|
||||
|
||||
ret = ptrace_check_attach(child, request == PTRACE_KILL);
|
||||
ret = ptrace_check_attach(child, request == PTRACE_KILL ||
|
||||
request == PTRACE_INTERRUPT);
|
||||
if (!ret)
|
||||
ret = compat_arch_ptrace(child, request, addr, data);
|
||||
|
||||
|
|
433
kernel/signal.c
433
kernel/signal.c
|
@ -87,7 +87,7 @@ static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
|
|||
/*
|
||||
* Tracers may want to know about even ignored signals.
|
||||
*/
|
||||
return !tracehook_consider_ignored_signal(t, sig);
|
||||
return !t->ptrace;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
|
|||
|
||||
static int recalc_sigpending_tsk(struct task_struct *t)
|
||||
{
|
||||
if ((t->group_stop & GROUP_STOP_PENDING) ||
|
||||
if ((t->jobctl & JOBCTL_PENDING_MASK) ||
|
||||
PENDING(&t->pending, &t->blocked) ||
|
||||
PENDING(&t->signal->shared_pending, &t->blocked)) {
|
||||
set_tsk_thread_flag(t, TIF_SIGPENDING);
|
||||
|
@ -150,9 +150,7 @@ void recalc_sigpending_and_wake(struct task_struct *t)
|
|||
|
||||
void recalc_sigpending(void)
|
||||
{
|
||||
if (unlikely(tracehook_force_sigpending()))
|
||||
set_thread_flag(TIF_SIGPENDING);
|
||||
else if (!recalc_sigpending_tsk(current) && !freezing(current))
|
||||
if (!recalc_sigpending_tsk(current) && !freezing(current))
|
||||
clear_thread_flag(TIF_SIGPENDING);
|
||||
|
||||
}
|
||||
|
@ -224,47 +222,93 @@ static inline void print_dropped_signal(int sig)
|
|||
}
|
||||
|
||||
/**
|
||||
* task_clear_group_stop_trapping - clear group stop trapping bit
|
||||
* task_set_jobctl_pending - set jobctl pending bits
|
||||
* @task: target task
|
||||
* @mask: pending bits to set
|
||||
*
|
||||
* Clear @mask from @task->jobctl. @mask must be subset of
|
||||
* %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
|
||||
* %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
|
||||
* cleared. If @task is already being killed or exiting, this function
|
||||
* becomes noop.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Must be called with @task->sighand->siglock held.
|
||||
*
|
||||
* RETURNS:
|
||||
* %true if @mask is set, %false if made noop because @task was dying.
|
||||
*/
|
||||
bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
|
||||
{
|
||||
BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
|
||||
JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
|
||||
BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
|
||||
|
||||
if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
|
||||
return false;
|
||||
|
||||
if (mask & JOBCTL_STOP_SIGMASK)
|
||||
task->jobctl &= ~JOBCTL_STOP_SIGMASK;
|
||||
|
||||
task->jobctl |= mask;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* task_clear_jobctl_trapping - clear jobctl trapping bit
|
||||
* @task: target task
|
||||
*
|
||||
* If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it
|
||||
* and wake up the ptracer. Note that we don't need any further locking.
|
||||
* @task->siglock guarantees that @task->parent points to the ptracer.
|
||||
* If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
|
||||
* Clear it and wake up the ptracer. Note that we don't need any further
|
||||
* locking. @task->siglock guarantees that @task->parent points to the
|
||||
* ptracer.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Must be called with @task->sighand->siglock held.
|
||||
*/
|
||||
static void task_clear_group_stop_trapping(struct task_struct *task)
|
||||
void task_clear_jobctl_trapping(struct task_struct *task)
|
||||
{
|
||||
if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
|
||||
task->group_stop &= ~GROUP_STOP_TRAPPING;
|
||||
__wake_up_sync_key(&task->parent->signal->wait_chldexit,
|
||||
TASK_UNINTERRUPTIBLE, 1, task);
|
||||
if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
|
||||
task->jobctl &= ~JOBCTL_TRAPPING;
|
||||
wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* task_clear_group_stop_pending - clear pending group stop
|
||||
* task_clear_jobctl_pending - clear jobctl pending bits
|
||||
* @task: target task
|
||||
* @mask: pending bits to clear
|
||||
*
|
||||
* Clear group stop states for @task.
|
||||
* Clear @mask from @task->jobctl. @mask must be subset of
|
||||
* %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
|
||||
* STOP bits are cleared together.
|
||||
*
|
||||
* If clearing of @mask leaves no stop or trap pending, this function calls
|
||||
* task_clear_jobctl_trapping().
|
||||
*
|
||||
* CONTEXT:
|
||||
* Must be called with @task->sighand->siglock held.
|
||||
*/
|
||||
void task_clear_group_stop_pending(struct task_struct *task)
|
||||
void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
|
||||
{
|
||||
task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME |
|
||||
GROUP_STOP_DEQUEUED);
|
||||
BUG_ON(mask & ~JOBCTL_PENDING_MASK);
|
||||
|
||||
if (mask & JOBCTL_STOP_PENDING)
|
||||
mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
|
||||
|
||||
task->jobctl &= ~mask;
|
||||
|
||||
if (!(task->jobctl & JOBCTL_PENDING_MASK))
|
||||
task_clear_jobctl_trapping(task);
|
||||
}
|
||||
|
||||
/**
|
||||
* task_participate_group_stop - participate in a group stop
|
||||
* @task: task participating in a group stop
|
||||
*
|
||||
* @task has GROUP_STOP_PENDING set and is participating in a group stop.
|
||||
* @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
|
||||
* Group stop states are cleared and the group stop count is consumed if
|
||||
* %GROUP_STOP_CONSUME was set. If the consumption completes the group
|
||||
* %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
|
||||
* stop, the appropriate %SIGNAL_* flags are set.
|
||||
*
|
||||
* CONTEXT:
|
||||
|
@ -277,11 +321,11 @@ void task_clear_group_stop_pending(struct task_struct *task)
|
|||
static bool task_participate_group_stop(struct task_struct *task)
|
||||
{
|
||||
struct signal_struct *sig = task->signal;
|
||||
bool consume = task->group_stop & GROUP_STOP_CONSUME;
|
||||
bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
|
||||
|
||||
WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
|
||||
WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
|
||||
|
||||
task_clear_group_stop_pending(task);
|
||||
task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
|
||||
|
||||
if (!consume)
|
||||
return false;
|
||||
|
@ -449,7 +493,8 @@ int unhandled_signal(struct task_struct *tsk, int sig)
|
|||
return 1;
|
||||
if (handler != SIG_IGN && handler != SIG_DFL)
|
||||
return 0;
|
||||
return !tracehook_consider_fatal_signal(tsk, sig);
|
||||
/* if ptraced, let the tracer determine */
|
||||
return !tsk->ptrace;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -604,7 +649,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
|
|||
* is to alert stop-signal processing code when another
|
||||
* processor has come along and cleared the flag.
|
||||
*/
|
||||
current->group_stop |= GROUP_STOP_DEQUEUED;
|
||||
current->jobctl |= JOBCTL_STOP_DEQUEUED;
|
||||
}
|
||||
if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
|
||||
/*
|
||||
|
@ -773,6 +818,32 @@ static int check_kill_permission(int sig, struct siginfo *info,
|
|||
return security_task_kill(t, info, sig, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* ptrace_trap_notify - schedule trap to notify ptracer
|
||||
* @t: tracee wanting to notify tracer
|
||||
*
|
||||
* This function schedules sticky ptrace trap which is cleared on the next
|
||||
* TRAP_STOP to notify ptracer of an event. @t must have been seized by
|
||||
* ptracer.
|
||||
*
|
||||
* If @t is running, STOP trap will be taken. If trapped for STOP and
|
||||
* ptracer is listening for events, tracee is woken up so that it can
|
||||
* re-trap for the new event. If trapped otherwise, STOP trap will be
|
||||
* eventually taken without returning to userland after the existing traps
|
||||
* are finished by PTRACE_CONT.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Must be called with @task->sighand->siglock held.
|
||||
*/
|
||||
static void ptrace_trap_notify(struct task_struct *t)
|
||||
{
|
||||
WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
|
||||
assert_spin_locked(&t->sighand->siglock);
|
||||
|
||||
task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
|
||||
signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle magic process-wide effects of stop/continue signals. Unlike
|
||||
* the signal actions, these happen immediately at signal-generation
|
||||
|
@ -809,9 +880,12 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
|
|||
rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
|
||||
t = p;
|
||||
do {
|
||||
task_clear_group_stop_pending(t);
|
||||
task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
|
||||
rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
|
||||
wake_up_state(t, __TASK_STOPPED);
|
||||
if (likely(!(t->ptrace & PT_SEIZED)))
|
||||
wake_up_state(t, __TASK_STOPPED);
|
||||
else
|
||||
ptrace_trap_notify(t);
|
||||
} while_each_thread(p, t);
|
||||
|
||||
/*
|
||||
|
@ -908,8 +982,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
|
|||
if (sig_fatal(p, sig) &&
|
||||
!(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
|
||||
!sigismember(&t->real_blocked, sig) &&
|
||||
(sig == SIGKILL ||
|
||||
!tracehook_consider_fatal_signal(t, sig))) {
|
||||
(sig == SIGKILL || !t->ptrace)) {
|
||||
/*
|
||||
* This signal will be fatal to the whole group.
|
||||
*/
|
||||
|
@ -925,7 +998,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
|
|||
signal->group_stop_count = 0;
|
||||
t = p;
|
||||
do {
|
||||
task_clear_group_stop_pending(t);
|
||||
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
|
||||
sigaddset(&t->pending.signal, SIGKILL);
|
||||
signal_wake_up(t, 1);
|
||||
} while_each_thread(p, t);
|
||||
|
@ -1160,7 +1233,7 @@ int zap_other_threads(struct task_struct *p)
|
|||
p->signal->group_stop_count = 0;
|
||||
|
||||
while_each_thread(p, t) {
|
||||
task_clear_group_stop_pending(t);
|
||||
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
|
||||
count++;
|
||||
|
||||
/* Don't bother with already dead threads */
|
||||
|
@ -1511,22 +1584,22 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
|
|||
* Let a parent know about the death of a child.
|
||||
* For a stopped/continued status change, use do_notify_parent_cldstop instead.
|
||||
*
|
||||
* Returns -1 if our parent ignored us and so we've switched to
|
||||
* self-reaping, or else @sig.
|
||||
* Returns true if our parent ignored us and so we've switched to
|
||||
* self-reaping.
|
||||
*/
|
||||
int do_notify_parent(struct task_struct *tsk, int sig)
|
||||
bool do_notify_parent(struct task_struct *tsk, int sig)
|
||||
{
|
||||
struct siginfo info;
|
||||
unsigned long flags;
|
||||
struct sighand_struct *psig;
|
||||
int ret = sig;
|
||||
bool autoreap = false;
|
||||
|
||||
BUG_ON(sig == -1);
|
||||
|
||||
/* do_notify_parent_cldstop should have been called instead. */
|
||||
BUG_ON(task_is_stopped_or_traced(tsk));
|
||||
|
||||
BUG_ON(!task_ptrace(tsk) &&
|
||||
BUG_ON(!tsk->ptrace &&
|
||||
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
|
||||
|
||||
info.si_signo = sig;
|
||||
|
@ -1565,7 +1638,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
|
|||
|
||||
psig = tsk->parent->sighand;
|
||||
spin_lock_irqsave(&psig->siglock, flags);
|
||||
if (!task_ptrace(tsk) && sig == SIGCHLD &&
|
||||
if (!tsk->ptrace && sig == SIGCHLD &&
|
||||
(psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
|
||||
(psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
|
||||
/*
|
||||
|
@ -1583,16 +1656,16 @@ int do_notify_parent(struct task_struct *tsk, int sig)
|
|||
* is implementation-defined: we do (if you don't want
|
||||
* it, just use SIG_IGN instead).
|
||||
*/
|
||||
ret = tsk->exit_signal = -1;
|
||||
autoreap = true;
|
||||
if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
|
||||
sig = -1;
|
||||
sig = 0;
|
||||
}
|
||||
if (valid_signal(sig) && sig > 0)
|
||||
if (valid_signal(sig) && sig)
|
||||
__group_send_sig_info(sig, &info, tsk->parent);
|
||||
__wake_up_parent(tsk, tsk->parent);
|
||||
spin_unlock_irqrestore(&psig->siglock, flags);
|
||||
|
||||
return ret;
|
||||
return autoreap;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1665,7 +1738,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
|
|||
|
||||
static inline int may_ptrace_stop(void)
|
||||
{
|
||||
if (!likely(task_ptrace(current)))
|
||||
if (!likely(current->ptrace))
|
||||
return 0;
|
||||
/*
|
||||
* Are we in the middle of do_coredump?
|
||||
|
@ -1693,15 +1766,6 @@ static int sigkill_pending(struct task_struct *tsk)
|
|||
sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test whether the target task of the usual cldstop notification - the
|
||||
* real_parent of @child - is in the same group as the ptracer.
|
||||
*/
|
||||
static bool real_parent_is_ptracer(struct task_struct *child)
|
||||
{
|
||||
return same_thread_group(child->parent, child->real_parent);
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called with current->sighand->siglock held.
|
||||
*
|
||||
|
@ -1739,31 +1803,34 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
|
|||
}
|
||||
|
||||
/*
|
||||
* If @why is CLD_STOPPED, we're trapping to participate in a group
|
||||
* stop. Do the bookkeeping. Note that if SIGCONT was delievered
|
||||
* while siglock was released for the arch hook, PENDING could be
|
||||
* clear now. We act as if SIGCONT is received after TASK_TRACED
|
||||
* is entered - ignore it.
|
||||
* We're committing to trapping. TRACED should be visible before
|
||||
* TRAPPING is cleared; otherwise, the tracer might fail do_wait().
|
||||
* Also, transition to TRACED and updates to ->jobctl should be
|
||||
* atomic with respect to siglock and should be done after the arch
|
||||
* hook as siglock is released and regrabbed across it.
|
||||
*/
|
||||
if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
|
||||
gstop_done = task_participate_group_stop(current);
|
||||
set_current_state(TASK_TRACED);
|
||||
|
||||
current->last_siginfo = info;
|
||||
current->exit_code = exit_code;
|
||||
|
||||
/*
|
||||
* TRACED should be visible before TRAPPING is cleared; otherwise,
|
||||
* the tracer might fail do_wait().
|
||||
* If @why is CLD_STOPPED, we're trapping to participate in a group
|
||||
* stop. Do the bookkeeping. Note that if SIGCONT was delievered
|
||||
* across siglock relocks since INTERRUPT was scheduled, PENDING
|
||||
* could be clear now. We act as if SIGCONT is received after
|
||||
* TASK_TRACED is entered - ignore it.
|
||||
*/
|
||||
set_current_state(TASK_TRACED);
|
||||
if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
|
||||
gstop_done = task_participate_group_stop(current);
|
||||
|
||||
/*
|
||||
* We're committing to trapping. Clearing GROUP_STOP_TRAPPING and
|
||||
* transition to TASK_TRACED should be atomic with respect to
|
||||
* siglock. This hsould be done after the arch hook as siglock is
|
||||
* released and regrabbed across it.
|
||||
*/
|
||||
task_clear_group_stop_trapping(current);
|
||||
/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
|
||||
task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
|
||||
if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
|
||||
task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
|
||||
|
||||
/* entering a trap, clear TRAPPING */
|
||||
task_clear_jobctl_trapping(current);
|
||||
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
read_lock(&tasklist_lock);
|
||||
|
@ -1779,7 +1846,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
|
|||
* separately unless they're gonna be duplicates.
|
||||
*/
|
||||
do_notify_parent_cldstop(current, true, why);
|
||||
if (gstop_done && !real_parent_is_ptracer(current))
|
||||
if (gstop_done && ptrace_reparented(current))
|
||||
do_notify_parent_cldstop(current, false, why);
|
||||
|
||||
/*
|
||||
|
@ -1799,9 +1866,9 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
|
|||
*
|
||||
* If @gstop_done, the ptracer went away between group stop
|
||||
* completion and here. During detach, it would have set
|
||||
* GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED
|
||||
* in do_signal_stop() on return, so notifying the real
|
||||
* parent of the group stop completion is enough.
|
||||
* JOBCTL_STOP_PENDING on us and we'll re-enter
|
||||
* TASK_STOPPED in do_signal_stop() on return, so notifying
|
||||
* the real parent of the group stop completion is enough.
|
||||
*/
|
||||
if (gstop_done)
|
||||
do_notify_parent_cldstop(current, false, why);
|
||||
|
@ -1827,6 +1894,9 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
|
|||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->last_siginfo = NULL;
|
||||
|
||||
/* LISTENING can be set only during STOP traps, clear it */
|
||||
current->jobctl &= ~JOBCTL_LISTENING;
|
||||
|
||||
/*
|
||||
* Queued signals ignored us while we were stopped for tracing.
|
||||
* So check for any that we should take before resuming user mode.
|
||||
|
@ -1835,44 +1905,66 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
|
|||
recalc_sigpending_tsk(current);
|
||||
}
|
||||
|
||||
void ptrace_notify(int exit_code)
|
||||
static void ptrace_do_notify(int signr, int exit_code, int why)
|
||||
{
|
||||
siginfo_t info;
|
||||
|
||||
BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
|
||||
|
||||
memset(&info, 0, sizeof info);
|
||||
info.si_signo = SIGTRAP;
|
||||
info.si_signo = signr;
|
||||
info.si_code = exit_code;
|
||||
info.si_pid = task_pid_vnr(current);
|
||||
info.si_uid = current_uid();
|
||||
|
||||
/* Let the debugger run. */
|
||||
ptrace_stop(exit_code, why, 1, &info);
|
||||
}
|
||||
|
||||
void ptrace_notify(int exit_code)
|
||||
{
|
||||
BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
|
||||
ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
}
|
||||
|
||||
/*
|
||||
* This performs the stopping for SIGSTOP and other stop signals.
|
||||
* We have to stop all threads in the thread group.
|
||||
* Returns non-zero if we've actually stopped and released the siglock.
|
||||
* Returns zero if we didn't stop and still hold the siglock.
|
||||
/**
|
||||
* do_signal_stop - handle group stop for SIGSTOP and other stop signals
|
||||
* @signr: signr causing group stop if initiating
|
||||
*
|
||||
* If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
|
||||
* and participate in it. If already set, participate in the existing
|
||||
* group stop. If participated in a group stop (and thus slept), %true is
|
||||
* returned with siglock released.
|
||||
*
|
||||
* If ptraced, this function doesn't handle stop itself. Instead,
|
||||
* %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
|
||||
* untouched. The caller must ensure that INTERRUPT trap handling takes
|
||||
* places afterwards.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Must be called with @current->sighand->siglock held, which is released
|
||||
* on %true return.
|
||||
*
|
||||
* RETURNS:
|
||||
* %false if group stop is already cancelled or ptrace trap is scheduled.
|
||||
* %true if participated in group stop.
|
||||
*/
|
||||
static int do_signal_stop(int signr)
|
||||
static bool do_signal_stop(int signr)
|
||||
__releases(¤t->sighand->siglock)
|
||||
{
|
||||
struct signal_struct *sig = current->signal;
|
||||
|
||||
if (!(current->group_stop & GROUP_STOP_PENDING)) {
|
||||
unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
|
||||
if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
|
||||
unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
|
||||
struct task_struct *t;
|
||||
|
||||
/* signr will be recorded in task->group_stop for retries */
|
||||
WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
|
||||
/* signr will be recorded in task->jobctl for retries */
|
||||
WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
|
||||
|
||||
if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) ||
|
||||
if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
|
||||
unlikely(signal_group_exit(sig)))
|
||||
return 0;
|
||||
return false;
|
||||
/*
|
||||
* There is no group stop already in progress. We must
|
||||
* initiate one now.
|
||||
|
@ -1895,28 +1987,32 @@ static int do_signal_stop(int signr)
|
|||
if (!(sig->flags & SIGNAL_STOP_STOPPED))
|
||||
sig->group_exit_code = signr;
|
||||
else
|
||||
WARN_ON_ONCE(!task_ptrace(current));
|
||||
WARN_ON_ONCE(!current->ptrace);
|
||||
|
||||
sig->group_stop_count = 0;
|
||||
|
||||
if (task_set_jobctl_pending(current, signr | gstop))
|
||||
sig->group_stop_count++;
|
||||
|
||||
current->group_stop &= ~GROUP_STOP_SIGMASK;
|
||||
current->group_stop |= signr | gstop;
|
||||
sig->group_stop_count = 1;
|
||||
for (t = next_thread(current); t != current;
|
||||
t = next_thread(t)) {
|
||||
t->group_stop &= ~GROUP_STOP_SIGMASK;
|
||||
/*
|
||||
* Setting state to TASK_STOPPED for a group
|
||||
* stop is always done with the siglock held,
|
||||
* so this check has no races.
|
||||
*/
|
||||
if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
|
||||
t->group_stop |= signr | gstop;
|
||||
if (!task_is_stopped(t) &&
|
||||
task_set_jobctl_pending(t, signr | gstop)) {
|
||||
sig->group_stop_count++;
|
||||
signal_wake_up(t, 0);
|
||||
if (likely(!(t->ptrace & PT_SEIZED)))
|
||||
signal_wake_up(t, 0);
|
||||
else
|
||||
ptrace_trap_notify(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
retry:
|
||||
if (likely(!task_ptrace(current))) {
|
||||
|
||||
if (likely(!current->ptrace)) {
|
||||
int notify = 0;
|
||||
|
||||
/*
|
||||
|
@ -1947,43 +2043,65 @@ static int do_signal_stop(int signr)
|
|||
|
||||
/* Now we don't run again until woken by SIGCONT or SIGKILL */
|
||||
schedule();
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
return true;
|
||||
} else {
|
||||
ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
|
||||
CLD_STOPPED, 0, NULL);
|
||||
/*
|
||||
* While ptraced, group stop is handled by STOP trap.
|
||||
* Schedule it and let the caller deal with it.
|
||||
*/
|
||||
task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* do_jobctl_trap - take care of ptrace jobctl traps
|
||||
*
|
||||
* When PT_SEIZED, it's used for both group stop and explicit
|
||||
* SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
|
||||
* accompanying siginfo. If stopped, lower eight bits of exit_code contain
|
||||
* the stop signal; otherwise, %SIGTRAP.
|
||||
*
|
||||
* When !PT_SEIZED, it's used only for group stop trap with stop signal
|
||||
* number as exit_code and no siginfo.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Must be called with @current->sighand->siglock held, which may be
|
||||
* released and re-acquired before returning with intervening sleep.
|
||||
*/
|
||||
static void do_jobctl_trap(void)
|
||||
{
|
||||
struct signal_struct *signal = current->signal;
|
||||
int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
|
||||
|
||||
if (current->ptrace & PT_SEIZED) {
|
||||
if (!signal->group_stop_count &&
|
||||
!(signal->flags & SIGNAL_STOP_STOPPED))
|
||||
signr = SIGTRAP;
|
||||
WARN_ON_ONCE(!signr);
|
||||
ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
|
||||
CLD_STOPPED);
|
||||
} else {
|
||||
WARN_ON_ONCE(!signr);
|
||||
ptrace_stop(signr, CLD_STOPPED, 0, NULL);
|
||||
current->exit_code = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* GROUP_STOP_PENDING could be set if another group stop has
|
||||
* started since being woken up or ptrace wants us to transit
|
||||
* between TASK_STOPPED and TRACED. Retry group stop.
|
||||
*/
|
||||
if (current->group_stop & GROUP_STOP_PENDING) {
|
||||
WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/* PTRACE_ATTACH might have raced with task killing, clear trapping */
|
||||
task_clear_group_stop_trapping(current);
|
||||
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
tracehook_finish_jctl();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int ptrace_signal(int signr, siginfo_t *info,
|
||||
struct pt_regs *regs, void *cookie)
|
||||
{
|
||||
if (!task_ptrace(current))
|
||||
return signr;
|
||||
|
||||
ptrace_signal_deliver(regs, cookie);
|
||||
|
||||
/* Let the debugger run. */
|
||||
/*
|
||||
* We do not check sig_kernel_stop(signr) but set this marker
|
||||
* unconditionally because we do not know whether debugger will
|
||||
* change signr. This flag has no meaning unless we are going
|
||||
* to stop after return from ptrace_stop(). In this case it will
|
||||
* be checked in do_signal_stop(), we should only stop if it was
|
||||
* not cleared by SIGCONT while we were sleeping. See also the
|
||||
* comment in dequeue_signal().
|
||||
*/
|
||||
current->jobctl |= JOBCTL_STOP_DEQUEUED;
|
||||
ptrace_stop(signr, CLD_TRAPPED, 0, info);
|
||||
|
||||
/* We're back. Did the debugger cancel the sig? */
|
||||
|
@ -2039,7 +2157,6 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
|
|||
* the CLD_ si_code into SIGNAL_CLD_MASK bits.
|
||||
*/
|
||||
if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
|
||||
struct task_struct *leader;
|
||||
int why;
|
||||
|
||||
if (signal->flags & SIGNAL_CLD_CONTINUED)
|
||||
|
@ -2060,13 +2177,11 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
|
|||
* a duplicate.
|
||||
*/
|
||||
read_lock(&tasklist_lock);
|
||||
|
||||
do_notify_parent_cldstop(current, false, why);
|
||||
|
||||
leader = current->group_leader;
|
||||
if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
|
||||
do_notify_parent_cldstop(leader, true, why);
|
||||
|
||||
if (ptrace_reparented(current->group_leader))
|
||||
do_notify_parent_cldstop(current->group_leader,
|
||||
true, why);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
goto relock;
|
||||
|
@ -2074,37 +2189,31 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
|
|||
|
||||
for (;;) {
|
||||
struct k_sigaction *ka;
|
||||
/*
|
||||
* Tracing can induce an artificial signal and choose sigaction.
|
||||
* The return value in @signr determines the default action,
|
||||
* but @info->si_signo is the signal number we will report.
|
||||
*/
|
||||
signr = tracehook_get_signal(current, regs, info, return_ka);
|
||||
if (unlikely(signr < 0))
|
||||
|
||||
if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
|
||||
do_signal_stop(0))
|
||||
goto relock;
|
||||
if (unlikely(signr != 0))
|
||||
ka = return_ka;
|
||||
else {
|
||||
if (unlikely(current->group_stop &
|
||||
GROUP_STOP_PENDING) && do_signal_stop(0))
|
||||
goto relock;
|
||||
|
||||
signr = dequeue_signal(current, ¤t->blocked,
|
||||
info);
|
||||
|
||||
if (!signr)
|
||||
break; /* will return 0 */
|
||||
|
||||
if (signr != SIGKILL) {
|
||||
signr = ptrace_signal(signr, info,
|
||||
regs, cookie);
|
||||
if (!signr)
|
||||
continue;
|
||||
}
|
||||
|
||||
ka = &sighand->action[signr-1];
|
||||
if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
|
||||
do_jobctl_trap();
|
||||
spin_unlock_irq(&sighand->siglock);
|
||||
goto relock;
|
||||
}
|
||||
|
||||
signr = dequeue_signal(current, ¤t->blocked, info);
|
||||
|
||||
if (!signr)
|
||||
break; /* will return 0 */
|
||||
|
||||
if (unlikely(current->ptrace) && signr != SIGKILL) {
|
||||
signr = ptrace_signal(signr, info,
|
||||
regs, cookie);
|
||||
if (!signr)
|
||||
continue;
|
||||
}
|
||||
|
||||
ka = &sighand->action[signr-1];
|
||||
|
||||
/* Trace actually delivered signals. */
|
||||
trace_signal_deliver(signr, info, ka);
|
||||
|
||||
|
@ -2260,7 +2369,7 @@ void exit_signals(struct task_struct *tsk)
|
|||
signotset(&unblocked);
|
||||
retarget_shared_pending(tsk, &unblocked);
|
||||
|
||||
if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
|
||||
if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
|
||||
task_participate_group_stop(tsk))
|
||||
group_stop = CLD_STOPPED;
|
||||
out:
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <linux/pagemap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/mount.h>
|
||||
|
@ -1087,7 +1086,7 @@ static unsigned long determine_vm_flags(struct file *file,
|
|||
* it's being traced - otherwise breakpoints set in it may interfere
|
||||
* with another untraced process
|
||||
*/
|
||||
if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current))
|
||||
if ((flags & MAP_PRIVATE) && current->ptrace)
|
||||
vm_flags &= ~VM_MAYSHARE;
|
||||
|
||||
return vm_flags;
|
||||
|
|
|
@ -339,8 +339,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
|
|||
* then wait for it to finish before killing
|
||||
* some other task unnecessarily.
|
||||
*/
|
||||
if (!(task_ptrace(p->group_leader) &
|
||||
PT_TRACE_EXIT))
|
||||
if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
|
||||
return ERR_PTR(-1UL);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ static int may_change_ptraced_domain(struct task_struct *task,
|
|||
int error = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
tracer = tracehook_tracer_task(task);
|
||||
tracer = ptrace_parent(task);
|
||||
if (tracer) {
|
||||
/* released below */
|
||||
cred = get_task_cred(tracer);
|
||||
|
|
|
@ -2053,7 +2053,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
|
|||
u32 ptsid = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
tracer = tracehook_tracer_task(current);
|
||||
tracer = ptrace_parent(current);
|
||||
if (likely(tracer != NULL)) {
|
||||
sec = __task_cred(tracer)->security;
|
||||
ptsid = sec->sid;
|
||||
|
@ -5319,7 +5319,7 @@ static int selinux_setprocattr(struct task_struct *p,
|
|||
Otherwise, leave SID unchanged and fail. */
|
||||
ptsid = 0;
|
||||
task_lock(p);
|
||||
tracer = tracehook_tracer_task(p);
|
||||
tracer = ptrace_parent(p);
|
||||
if (tracer)
|
||||
ptsid = task_sid(tracer);
|
||||
task_unlock(p);
|
||||
|
|
Loading…
Reference in New Issue