Merge branch 'work.sys_wait' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull wait syscall updates from Al Viro: "Consolidating sys_wait* and compat counterparts. Gets rid of set_fs()/double-copy mess, simplifies the whole thing (lifting the copyouts to the syscalls means less headache in the part that does actual work - fewer failure exits, to start with), gets rid of the overhead of field-by-field __put_user()" * 'work.sys_wait' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: osf_wait4: switch to kernel_wait4() waitid(): switch copyout of siginfo to unsafe_put_user() wait_task_zombie: consolidate info logics kill wait_noreap_copyout() lift getrusage() from wait_noreap_copyout() waitid(2): leave copyout of siginfo to syscall itself kernel_wait4()/kernel_waitid(): delay copying status to userland wait4(2)/waitid(2): separate copying rusage to userland move compat wait4 and waitid next to native variants
This commit is contained in:
commit
4be95131bf
|
@ -1183,48 +1183,23 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
|
|||
SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
|
||||
struct rusage32 __user *, ur)
|
||||
{
|
||||
struct rusage r;
|
||||
long ret, err;
|
||||
unsigned int status = 0;
|
||||
mm_segment_t old_fs;
|
||||
|
||||
if (!ur)
|
||||
return sys_wait4(pid, ustatus, options, NULL);
|
||||
|
||||
old_fs = get_fs();
|
||||
|
||||
set_fs (KERNEL_DS);
|
||||
ret = sys_wait4(pid, (unsigned int __user *) &status, options,
|
||||
(struct rusage __user *) &r);
|
||||
set_fs (old_fs);
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
|
||||
struct rusage r;
|
||||
long err = kernel_wait4(pid, &status, options, &r);
|
||||
if (err <= 0)
|
||||
return err;
|
||||
if (put_user(status, ustatus))
|
||||
return -EFAULT;
|
||||
|
||||
err = put_user(status, ustatus);
|
||||
if (ret < 0)
|
||||
return err ? err : ret;
|
||||
|
||||
err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
|
||||
err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
|
||||
err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
|
||||
err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec);
|
||||
err |= __put_user(r.ru_maxrss, &ur->ru_maxrss);
|
||||
err |= __put_user(r.ru_ixrss, &ur->ru_ixrss);
|
||||
err |= __put_user(r.ru_idrss, &ur->ru_idrss);
|
||||
err |= __put_user(r.ru_isrss, &ur->ru_isrss);
|
||||
err |= __put_user(r.ru_minflt, &ur->ru_minflt);
|
||||
err |= __put_user(r.ru_majflt, &ur->ru_majflt);
|
||||
err |= __put_user(r.ru_nswap, &ur->ru_nswap);
|
||||
err |= __put_user(r.ru_inblock, &ur->ru_inblock);
|
||||
err |= __put_user(r.ru_oublock, &ur->ru_oublock);
|
||||
err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd);
|
||||
err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv);
|
||||
err |= __put_user(r.ru_nsignals, &ur->ru_nsignals);
|
||||
err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw);
|
||||
err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw);
|
||||
|
||||
return err ? err : ret;
|
||||
if (!ur)
|
||||
return err;
|
||||
if (put_tv32(&ur->ru_utime, &r.ru_utime))
|
||||
return -EFAULT;
|
||||
if (put_tv32(&ur->ru_stime, &r.ru_stime))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(&ur->ru_maxrss, &r.ru_maxrss,
|
||||
sizeof(struct rusage32) - offsetof(struct rusage32, ru_maxrss)))
|
||||
return -EFAULT;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
struct task_struct;
|
||||
|
||||
int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
|
||||
void getrusage(struct task_struct *p, int who, struct rusage *ru);
|
||||
int do_prlimit(struct task_struct *tsk, unsigned int resource,
|
||||
struct rlimit *new_rlim, struct rlimit *old_rlim);
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/sched.h>
|
||||
|
||||
struct task_struct;
|
||||
struct rusage;
|
||||
union thread_union;
|
||||
|
||||
/*
|
||||
|
@ -74,6 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *,
|
|||
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
|
||||
struct task_struct *fork_idle(int);
|
||||
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
|
||||
extern long kernel_wait4(pid_t, int *, int, struct rusage *);
|
||||
|
||||
extern void free_task(struct task_struct *tsk);
|
||||
|
||||
|
|
|
@ -396,72 +396,6 @@ int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru)
|
|||
return 0;
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE4(wait4,
|
||||
compat_pid_t, pid,
|
||||
compat_uint_t __user *, stat_addr,
|
||||
int, options,
|
||||
struct compat_rusage __user *, ru)
|
||||
{
|
||||
if (!ru) {
|
||||
return sys_wait4(pid, stat_addr, options, NULL);
|
||||
} else {
|
||||
struct rusage r;
|
||||
int ret;
|
||||
unsigned int status;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
set_fs (KERNEL_DS);
|
||||
ret = sys_wait4(pid,
|
||||
(stat_addr ?
|
||||
(unsigned int __user *) &status : NULL),
|
||||
options, (struct rusage __user *) &r);
|
||||
set_fs (old_fs);
|
||||
|
||||
if (ret > 0) {
|
||||
if (put_compat_rusage(&r, ru))
|
||||
return -EFAULT;
|
||||
if (stat_addr && put_user(status, stat_addr))
|
||||
return -EFAULT;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE5(waitid,
|
||||
int, which, compat_pid_t, pid,
|
||||
struct compat_siginfo __user *, uinfo, int, options,
|
||||
struct compat_rusage __user *, uru)
|
||||
{
|
||||
siginfo_t info;
|
||||
struct rusage ru;
|
||||
long ret;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
|
||||
uru ? (struct rusage __user *)&ru : NULL);
|
||||
set_fs(old_fs);
|
||||
|
||||
if ((ret < 0) || (info.si_signo == 0))
|
||||
return ret;
|
||||
|
||||
if (uru) {
|
||||
/* sys_waitid() overwrites everything in ru */
|
||||
if (COMPAT_USE_64BIT_TIME)
|
||||
ret = copy_to_user(uru, &ru, sizeof(ru));
|
||||
else
|
||||
ret = put_compat_rusage(&ru, uru);
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
BUG_ON(info.si_code & __SI_MASK);
|
||||
info.si_code |= __SI_CHLD;
|
||||
return copy_siginfo_to_user32(uinfo, &info);
|
||||
}
|
||||
|
||||
static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
|
||||
unsigned len, struct cpumask *new_mask)
|
||||
{
|
||||
|
|
311
kernel/exit.c
311
kernel/exit.c
|
@ -62,6 +62,7 @@
|
|||
#include <linux/kcov.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/rcuwait.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
|
@ -982,14 +983,21 @@ SYSCALL_DEFINE1(exit_group, int, error_code)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct waitid_info {
|
||||
pid_t pid;
|
||||
uid_t uid;
|
||||
int status;
|
||||
int cause;
|
||||
};
|
||||
|
||||
struct wait_opts {
|
||||
enum pid_type wo_type;
|
||||
int wo_flags;
|
||||
struct pid *wo_pid;
|
||||
|
||||
struct siginfo __user *wo_info;
|
||||
int __user *wo_stat;
|
||||
struct rusage __user *wo_rusage;
|
||||
struct waitid_info *wo_info;
|
||||
int wo_stat;
|
||||
struct rusage *wo_rusage;
|
||||
|
||||
wait_queue_entry_t child_wait;
|
||||
int notask_error;
|
||||
|
@ -1036,34 +1044,6 @@ eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
|
||||
pid_t pid, uid_t uid, int why, int status)
|
||||
{
|
||||
struct siginfo __user *infop;
|
||||
int retval = wo->wo_rusage
|
||||
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
|
||||
|
||||
put_task_struct(p);
|
||||
infop = wo->wo_info;
|
||||
if (infop) {
|
||||
if (!retval)
|
||||
retval = put_user(SIGCHLD, &infop->si_signo);
|
||||
if (!retval)
|
||||
retval = put_user(0, &infop->si_errno);
|
||||
if (!retval)
|
||||
retval = put_user((short)why, &infop->si_code);
|
||||
if (!retval)
|
||||
retval = put_user(pid, &infop->si_pid);
|
||||
if (!retval)
|
||||
retval = put_user(uid, &infop->si_uid);
|
||||
if (!retval)
|
||||
retval = put_user(status, &infop->si_status);
|
||||
}
|
||||
if (!retval)
|
||||
retval = pid;
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
|
||||
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
|
||||
|
@ -1072,30 +1052,23 @@ static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
|
|||
*/
|
||||
static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
|
||||
{
|
||||
int state, retval, status;
|
||||
int state, status;
|
||||
pid_t pid = task_pid_vnr(p);
|
||||
uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
|
||||
struct siginfo __user *infop;
|
||||
struct waitid_info *infop;
|
||||
|
||||
if (!likely(wo->wo_flags & WEXITED))
|
||||
return 0;
|
||||
|
||||
if (unlikely(wo->wo_flags & WNOWAIT)) {
|
||||
int exit_code = p->exit_code;
|
||||
int why;
|
||||
|
||||
status = p->exit_code;
|
||||
get_task_struct(p);
|
||||
read_unlock(&tasklist_lock);
|
||||
sched_annotate_sleep();
|
||||
|
||||
if ((exit_code & 0x7f) == 0) {
|
||||
why = CLD_EXITED;
|
||||
status = exit_code >> 8;
|
||||
} else {
|
||||
why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
|
||||
status = exit_code & 0x7f;
|
||||
}
|
||||
return wait_noreap_copyout(wo, p, pid, uid, why, status);
|
||||
if (wo->wo_rusage)
|
||||
getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
|
||||
put_task_struct(p);
|
||||
goto out_info;
|
||||
}
|
||||
/*
|
||||
* Move the task's state to DEAD/TRACE, only one thread can do this.
|
||||
|
@ -1168,38 +1141,11 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
|
|||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
}
|
||||
|
||||
retval = wo->wo_rusage
|
||||
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
|
||||
if (wo->wo_rusage)
|
||||
getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
|
||||
status = (p->signal->flags & SIGNAL_GROUP_EXIT)
|
||||
? p->signal->group_exit_code : p->exit_code;
|
||||
if (!retval && wo->wo_stat)
|
||||
retval = put_user(status, wo->wo_stat);
|
||||
|
||||
infop = wo->wo_info;
|
||||
if (!retval && infop)
|
||||
retval = put_user(SIGCHLD, &infop->si_signo);
|
||||
if (!retval && infop)
|
||||
retval = put_user(0, &infop->si_errno);
|
||||
if (!retval && infop) {
|
||||
int why;
|
||||
|
||||
if ((status & 0x7f) == 0) {
|
||||
why = CLD_EXITED;
|
||||
status >>= 8;
|
||||
} else {
|
||||
why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
|
||||
status &= 0x7f;
|
||||
}
|
||||
retval = put_user((short)why, &infop->si_code);
|
||||
if (!retval)
|
||||
retval = put_user(status, &infop->si_status);
|
||||
}
|
||||
if (!retval && infop)
|
||||
retval = put_user(pid, &infop->si_pid);
|
||||
if (!retval && infop)
|
||||
retval = put_user(uid, &infop->si_uid);
|
||||
if (!retval)
|
||||
retval = pid;
|
||||
wo->wo_stat = status;
|
||||
|
||||
if (state == EXIT_TRACE) {
|
||||
write_lock_irq(&tasklist_lock);
|
||||
|
@ -1216,7 +1162,21 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
|
|||
if (state == EXIT_DEAD)
|
||||
release_task(p);
|
||||
|
||||
return retval;
|
||||
out_info:
|
||||
infop = wo->wo_info;
|
||||
if (infop) {
|
||||
if ((status & 0x7f) == 0) {
|
||||
infop->cause = CLD_EXITED;
|
||||
infop->status = status >> 8;
|
||||
} else {
|
||||
infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
|
||||
infop->status = status & 0x7f;
|
||||
}
|
||||
infop->pid = pid;
|
||||
infop->uid = uid;
|
||||
}
|
||||
|
||||
return pid;
|
||||
}
|
||||
|
||||
static int *task_stopped_code(struct task_struct *p, bool ptrace)
|
||||
|
@ -1252,8 +1212,8 @@ static int *task_stopped_code(struct task_struct *p, bool ptrace)
|
|||
static int wait_task_stopped(struct wait_opts *wo,
|
||||
int ptrace, struct task_struct *p)
|
||||
{
|
||||
struct siginfo __user *infop;
|
||||
int retval, exit_code, *p_code, why;
|
||||
struct waitid_info *infop;
|
||||
int exit_code, *p_code, why;
|
||||
uid_t uid = 0; /* unneeded, required by compiler */
|
||||
pid_t pid;
|
||||
|
||||
|
@ -1298,34 +1258,21 @@ static int wait_task_stopped(struct wait_opts *wo,
|
|||
why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
|
||||
read_unlock(&tasklist_lock);
|
||||
sched_annotate_sleep();
|
||||
|
||||
if (unlikely(wo->wo_flags & WNOWAIT))
|
||||
return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
|
||||
|
||||
retval = wo->wo_rusage
|
||||
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
|
||||
if (!retval && wo->wo_stat)
|
||||
retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
|
||||
|
||||
infop = wo->wo_info;
|
||||
if (!retval && infop)
|
||||
retval = put_user(SIGCHLD, &infop->si_signo);
|
||||
if (!retval && infop)
|
||||
retval = put_user(0, &infop->si_errno);
|
||||
if (!retval && infop)
|
||||
retval = put_user((short)why, &infop->si_code);
|
||||
if (!retval && infop)
|
||||
retval = put_user(exit_code, &infop->si_status);
|
||||
if (!retval && infop)
|
||||
retval = put_user(pid, &infop->si_pid);
|
||||
if (!retval && infop)
|
||||
retval = put_user(uid, &infop->si_uid);
|
||||
if (!retval)
|
||||
retval = pid;
|
||||
if (wo->wo_rusage)
|
||||
getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
|
||||
put_task_struct(p);
|
||||
|
||||
BUG_ON(!retval);
|
||||
return retval;
|
||||
if (likely(!(wo->wo_flags & WNOWAIT)))
|
||||
wo->wo_stat = (exit_code << 8) | 0x7f;
|
||||
|
||||
infop = wo->wo_info;
|
||||
if (infop) {
|
||||
infop->cause = why;
|
||||
infop->status = exit_code;
|
||||
infop->pid = pid;
|
||||
infop->uid = uid;
|
||||
}
|
||||
return pid;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1336,7 +1283,7 @@ static int wait_task_stopped(struct wait_opts *wo,
|
|||
*/
|
||||
static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
|
||||
{
|
||||
int retval;
|
||||
struct waitid_info *infop;
|
||||
pid_t pid;
|
||||
uid_t uid;
|
||||
|
||||
|
@ -1361,22 +1308,20 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
|
|||
get_task_struct(p);
|
||||
read_unlock(&tasklist_lock);
|
||||
sched_annotate_sleep();
|
||||
|
||||
if (!wo->wo_info) {
|
||||
retval = wo->wo_rusage
|
||||
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
|
||||
if (wo->wo_rusage)
|
||||
getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
|
||||
put_task_struct(p);
|
||||
if (!retval && wo->wo_stat)
|
||||
retval = put_user(0xffff, wo->wo_stat);
|
||||
if (!retval)
|
||||
retval = pid;
|
||||
} else {
|
||||
retval = wait_noreap_copyout(wo, p, pid, uid,
|
||||
CLD_CONTINUED, SIGCONT);
|
||||
BUG_ON(retval == 0);
|
||||
}
|
||||
|
||||
return retval;
|
||||
infop = wo->wo_info;
|
||||
if (!infop) {
|
||||
wo->wo_stat = 0xffff;
|
||||
} else {
|
||||
infop->cause = CLD_CONTINUED;
|
||||
infop->pid = pid;
|
||||
infop->uid = uid;
|
||||
infop->status = SIGCONT;
|
||||
}
|
||||
return pid;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1604,8 +1549,8 @@ static long do_wait(struct wait_opts *wo)
|
|||
return retval;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
|
||||
infop, int, options, struct rusage __user *, ru)
|
||||
static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
|
||||
int options, struct rusage *ru)
|
||||
{
|
||||
struct wait_opts wo;
|
||||
struct pid *pid = NULL;
|
||||
|
@ -1643,38 +1588,46 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
|
|||
wo.wo_pid = pid;
|
||||
wo.wo_flags = options;
|
||||
wo.wo_info = infop;
|
||||
wo.wo_stat = NULL;
|
||||
wo.wo_rusage = ru;
|
||||
ret = do_wait(&wo);
|
||||
|
||||
if (ret > 0) {
|
||||
if (ret > 0)
|
||||
ret = 0;
|
||||
} else if (infop) {
|
||||
/*
|
||||
* For a WNOHANG return, clear out all the fields
|
||||
* we would set so the user can easily tell the
|
||||
* difference.
|
||||
*/
|
||||
if (!ret)
|
||||
ret = put_user(0, &infop->si_signo);
|
||||
if (!ret)
|
||||
ret = put_user(0, &infop->si_errno);
|
||||
if (!ret)
|
||||
ret = put_user(0, &infop->si_code);
|
||||
if (!ret)
|
||||
ret = put_user(0, &infop->si_pid);
|
||||
if (!ret)
|
||||
ret = put_user(0, &infop->si_uid);
|
||||
if (!ret)
|
||||
ret = put_user(0, &infop->si_status);
|
||||
}
|
||||
|
||||
put_pid(pid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
|
||||
int, options, struct rusage __user *, ru)
|
||||
SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
|
||||
infop, int, options, struct rusage __user *, ru)
|
||||
{
|
||||
struct rusage r;
|
||||
struct waitid_info info = {.status = 0};
|
||||
long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
|
||||
|
||||
if (!err) {
|
||||
if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (!infop)
|
||||
return err;
|
||||
|
||||
user_access_begin();
|
||||
unsafe_put_user(err ? 0 : SIGCHLD, &infop->si_signo, Efault);
|
||||
unsafe_put_user(0, &infop->si_errno, Efault);
|
||||
unsafe_put_user((short)info.cause, &infop->si_code, Efault);
|
||||
unsafe_put_user(info.pid, &infop->si_pid, Efault);
|
||||
unsafe_put_user(info.uid, &infop->si_uid, Efault);
|
||||
unsafe_put_user(info.status, &infop->si_status, Efault);
|
||||
user_access_end();
|
||||
return err;
|
||||
Efault:
|
||||
user_access_end();
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
|
||||
struct rusage *ru)
|
||||
{
|
||||
struct wait_opts wo;
|
||||
struct pid *pid = NULL;
|
||||
|
@ -1702,14 +1655,29 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
|
|||
wo.wo_pid = pid;
|
||||
wo.wo_flags = options | WEXITED;
|
||||
wo.wo_info = NULL;
|
||||
wo.wo_stat = stat_addr;
|
||||
wo.wo_stat = 0;
|
||||
wo.wo_rusage = ru;
|
||||
ret = do_wait(&wo);
|
||||
put_pid(pid);
|
||||
if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
|
||||
ret = -EFAULT;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
|
||||
int, options, struct rusage __user *, ru)
|
||||
{
|
||||
struct rusage r;
|
||||
long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL);
|
||||
|
||||
if (err > 0) {
|
||||
if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
|
||||
return -EFAULT;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef __ARCH_WANT_SYS_WAITPID
|
||||
|
||||
/*
|
||||
|
@ -1722,3 +1690,56 @@ SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
|
|||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
COMPAT_SYSCALL_DEFINE4(wait4,
|
||||
compat_pid_t, pid,
|
||||
compat_uint_t __user *, stat_addr,
|
||||
int, options,
|
||||
struct compat_rusage __user *, ru)
|
||||
{
|
||||
struct rusage r;
|
||||
long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
|
||||
if (err > 0) {
|
||||
if (ru && put_compat_rusage(&r, ru))
|
||||
return -EFAULT;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE5(waitid,
|
||||
int, which, compat_pid_t, pid,
|
||||
struct compat_siginfo __user *, infop, int, options,
|
||||
struct compat_rusage __user *, uru)
|
||||
{
|
||||
struct rusage ru;
|
||||
struct waitid_info info = {.status = 0};
|
||||
long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
|
||||
|
||||
if (!err && uru) {
|
||||
/* kernel_waitid() overwrites everything in ru */
|
||||
if (COMPAT_USE_64BIT_TIME)
|
||||
err = copy_to_user(uru, &ru, sizeof(ru));
|
||||
else
|
||||
err = put_compat_rusage(&ru, uru);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!infop)
|
||||
return err;
|
||||
|
||||
user_access_begin();
|
||||
unsafe_put_user(err ? 0 : SIGCHLD, &infop->si_signo, Efault);
|
||||
unsafe_put_user(0, &infop->si_errno, Efault);
|
||||
unsafe_put_user((short)info.cause, &infop->si_code, Efault);
|
||||
unsafe_put_user(info.pid, &infop->si_pid, Efault);
|
||||
unsafe_put_user(info.uid, &infop->si_uid, Efault);
|
||||
unsafe_put_user(info.status, &infop->si_status, Efault);
|
||||
user_access_end();
|
||||
return err;
|
||||
Efault:
|
||||
user_access_end();
|
||||
return -EFAULT;
|
||||
}
|
||||
#endif
|
||||
|
|
16
kernel/sys.c
16
kernel/sys.c
|
@ -1552,7 +1552,7 @@ static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
|
|||
r->ru_oublock += task_io_get_oublock(t);
|
||||
}
|
||||
|
||||
static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
|
||||
void getrusage(struct task_struct *p, int who, struct rusage *r)
|
||||
{
|
||||
struct task_struct *t;
|
||||
unsigned long flags;
|
||||
|
@ -1626,20 +1626,16 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
|
|||
r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
|
||||
}
|
||||
|
||||
int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
|
||||
SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
|
||||
{
|
||||
struct rusage r;
|
||||
|
||||
k_getrusage(p, who, &r);
|
||||
return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
|
||||
{
|
||||
if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
|
||||
who != RUSAGE_THREAD)
|
||||
return -EINVAL;
|
||||
return getrusage(current, who, ru);
|
||||
|
||||
getrusage(current, who, &r);
|
||||
return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -1651,7 +1647,7 @@ COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
|
|||
who != RUSAGE_THREAD)
|
||||
return -EINVAL;
|
||||
|
||||
k_getrusage(current, who, &r);
|
||||
getrusage(current, who, &r);
|
||||
return put_compat_rusage(&r, ru);
|
||||
}
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue