Merge branch 'signal-for-v5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace
Pull signal/exit/ptrace updates from Eric Biederman: "This set of changes deletes some dead code, makes a lot of cleanups which hopefully make the code easier to follow, and fixes bugs found along the way. The end-game which I have not yet reached yet is for fatal signals that generate coredumps to be short-circuit deliverable from complete_signal, for force_siginfo_to_task not to require changing userspace configured signal delivery state, and for the ptrace stops to always happen in locations where we can guarantee on all architectures that the all of the registers are saved and available on the stack. Removal of profile_task_ext, profile_munmap, and profile_handoff_task are the big successes for dead code removal this round. A bunch of small bug fixes are included, as most of the issues reported were small enough that they would not affect bisection so I simply added the fixes and did not fold the fixes into the changes they were fixing. There was a bug that broke coredumps piped to systemd-coredump. I dropped the change that caused that bug and replaced it entirely with something much more restrained. Unfortunately that required some rebasing. Some successes after this set of changes: There are few enough calls to do_exit to audit in a reasonable amount of time. The lifetime of struct kthread now matches the lifetime of struct task, and the pointer to struct kthread is no longer stored in set_child_tid. The flag SIGNAL_GROUP_COREDUMP is removed. The field group_exit_task is removed. Issues where task->exit_code was examined with signal->group_exit_code should been examined were fixed. There are several loosely related changes included because I am cleaning up and if I don't include them they will probably get lost. The original postings of these changes can be found at: https://lkml.kernel.org/r/87a6ha4zsd.fsf@email.froward.int.ebiederm.org https://lkml.kernel.org/r/87bl1kunjj.fsf@email.froward.int.ebiederm.org https://lkml.kernel.org/r/87r19opkx1.fsf_-_@email.froward.int.ebiederm.org I trimmed back the last set of changes to only the obviously correct once. Simply because there was less time for review than I had hoped" * 'signal-for-v5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: (44 commits) ptrace/m68k: Stop open coding ptrace_report_syscall ptrace: Remove unused regs argument from ptrace_report_syscall ptrace: Remove second setting of PT_SEIZED in ptrace_attach taskstats: Cleanup the use of task->exit_code exit: Use the correct exit_code in /proc/<pid>/stat exit: Fix the exit_code for wait_task_zombie exit: Coredumps reach do_group_exit exit: Remove profile_handoff_task exit: Remove profile_task_exit & profile_munmap signal: clean up kernel-doc comments signal: Remove the helper signal_group_exit signal: Rename group_exit_task group_exec_task coredump: Stop setting signal->group_exit_task signal: Remove SIGNAL_GROUP_COREDUMP signal: During coredumps set SIGNAL_GROUP_EXIT in zap_process signal: Make coredump handling explicit in complete_signal signal: Have prepare_signal detect coredumps using signal->core_state signal: Have the oom killer detect coredumps using signal->core_state exit: Move force_uaccess back into do_exit exit: Guarantee make_task_dead leaks the tsk when calling do_task_exit ...
This commit is contained in:
commit
35ce8ae9ae
|
@ -190,7 +190,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
|
|||
local_irq_enable();
|
||||
while (1);
|
||||
}
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_MATHEMU
|
||||
|
@ -575,7 +575,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
|
|||
|
||||
printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
|
||||
pc, va, opcode, reg);
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
|
||||
got_exception:
|
||||
/* Ok, we caught the exception, but we don't want it. Is there
|
||||
|
@ -630,7 +630,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
|
|||
local_irq_enable();
|
||||
while (1);
|
||||
}
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -202,7 +202,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
|||
printk(KERN_ALERT "Unable to handle kernel paging request at "
|
||||
"virtual address %016lx\n", address);
|
||||
die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
|
||||
do_exit(SIGKILL);
|
||||
make_task_dead(SIGKILL);
|
||||
|
||||
/* We ran out of memory, or some other thing happened to us that
|
||||
made us unable to handle the page fault gracefully. */
|
||||
|
|
|
@ -335,7 +335,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
|
|||
if (panic_on_oops)
|
||||
panic("Fatal exception");
|
||||
if (signr)
|
||||
do_exit(signr);
|
||||
make_task_dead(signr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -117,7 +117,7 @@ static void die_kernel_fault(const char *msg, struct mm_struct *mm,
|
|||
show_pte(KERN_ALERT, mm, addr);
|
||||
die("Oops", regs, fsr);
|
||||
bust_spinlocks(0);
|
||||
do_exit(SIGKILL);
|
||||
make_task_dead(SIGKILL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -235,7 +235,7 @@ void die(const char *str, struct pt_regs *regs, int err)
|
|||
raw_spin_unlock_irqrestore(&die_lock, flags);
|
||||
|
||||
if (ret != NOTIFY_STOP)
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
|
||||
static void arm64_show_signal(int signo, const char *str)
|
||||
|
|
|
@ -304,7 +304,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
|
|||
show_pte(addr);
|
||||
die("Oops", regs, esr);
|
||||
bust_spinlocks(0);
|
||||
do_exit(SIGKILL);
|
||||
make_task_dead(SIGKILL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
|
|
|
@ -294,7 +294,7 @@ void csky_alignment(struct pt_regs *regs)
|
|||
__func__, opcode, rz, rx, imm, addr);
|
||||
show_regs(regs);
|
||||
bust_spinlocks(0);
|
||||
do_exit(SIGKILL);
|
||||
make_task_dead(SIGKILL);
|
||||
}
|
||||
|
||||
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
|
||||
|
|
|
@ -109,7 +109,7 @@ void die(struct pt_regs *regs, const char *str)
|
|||
if (panic_on_oops)
|
||||
panic("Fatal exception");
|
||||
if (ret != NOTIFY_STOP)
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
|
||||
void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
|
||||
|
|
|
@ -67,7 +67,7 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
|
|||
pr_alert("Unable to handle kernel paging request at virtual "
|
||||
"addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc);
|
||||
die(regs, "Oops");
|
||||
do_exit(SIGKILL);
|
||||
make_task_dead(SIGKILL);
|
||||
}
|
||||
|
||||
static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/debug.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
|
@ -106,7 +107,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err)
|
|||
dump(fp);
|
||||
|
||||
spin_unlock_irq(&die_lock);
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
|
||||
static int kstack_depth_to_print = 24;
|
||||
|
|
|
@ -51,7 +51,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
|
|||
printk(" at virtual address %08lx\n", address);
|
||||
if (!user_mode(regs))
|
||||
die("Oops", regs, error_code);
|
||||
do_exit(SIGKILL);
|
||||
make_task_dead(SIGKILL);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -214,7 +214,7 @@ int die(const char *str, struct pt_regs *regs, long err)
|
|||
panic("Fatal exception");
|
||||
|
||||
oops_exit();
|
||||
do_exit(err);
|
||||
make_task_dead(err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -176,7 +176,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
|
|||
spin_unlock(&mca_bh_lock);
|
||||
|
||||
/* This process is about to be killed itself */
|
||||
do_exit(SIGKILL);
|
||||
make_task_dead(SIGKILL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -85,7 +85,7 @@ die (const char *str, struct pt_regs *regs, long err)
|
|||
if (panic_on_oops)
|
||||
panic("Fatal exception");
|
||||
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -257,7 +257,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
|||
regs = NULL;
|
||||
bust_spinlocks(0);
|
||||
if (regs)
|
||||
do_exit(SIGKILL);
|
||||
make_task_dead(SIGKILL);
|
||||
return;
|
||||
|
||||
out_of_memory:
|
||||
|
|
|
@ -273,17 +273,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
|
||||
asmlinkage void syscall_trace(void)
|
||||
{
|
||||
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
|
||||
? 0x80 : 0));
|
||||
/*
|
||||
* this isn't the same as continuing with a signal, but it will do
|
||||
* for normal use. strace only continues with a signal if the
|
||||
* stopping signal is not SIGTRAP. -brl
|
||||
*/
|
||||
if (current->exit_code) {
|
||||
send_sig(current->exit_code, current, 1);
|
||||
current->exit_code = 0;
|
||||
}
|
||||
ptrace_report_syscall(0);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
|
||||
|
|
|
@ -1131,7 +1131,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr)
|
|||
pr_crit("%s: %08x\n", str, nr);
|
||||
show_registers(fp);
|
||||
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
|
||||
asmlinkage void set_esp0(unsigned long ssp)
|
||||
|
|
|
@ -48,7 +48,7 @@ int send_fault_sig(struct pt_regs *regs)
|
|||
pr_alert("Unable to handle kernel access");
|
||||
pr_cont(" at virtual address %p\n", addr);
|
||||
die_if_kernel("Oops", regs, 0 /*error_code*/);
|
||||
do_exit(SIGKILL);
|
||||
make_task_dead(SIGKILL);
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
|
|
@ -44,10 +44,10 @@ void die(const char *str, struct pt_regs *fp, long err)
|
|||
pr_warn("Oops: %s, sig: %ld\n", str, err);
|
||||
show_regs(fp);
|
||||
spin_unlock_irq(&die_lock);
|
||||
/* do_exit() should take care of panic'ing from an interrupt
|
||||
/* make_task_dead() should take care of panic'ing from an interrupt
|
||||
* context so we don't handle it here
|
||||
*/
|
||||
do_exit(err);
|
||||
make_task_dead(err);
|
||||
}
|
||||
|
||||
/* for user application debugging */
|
||||
|
|
|
@ -422,7 +422,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
|
|||
if (regs && kexec_should_crash(current))
|
||||
crash_kexec(regs);
|
||||
|
||||
do_exit(sig);
|
||||
make_task_dead(sig);
|
||||
}
|
||||
|
||||
extern struct exception_table_entry __start___dbe_table[];
|
||||
|
|
|
@ -223,7 +223,7 @@ inline void handle_fpu_exception(struct pt_regs *regs)
|
|||
}
|
||||
} else if (fpcsr & FPCSR_mskRIT) {
|
||||
if (!user_mode(regs))
|
||||
do_exit(SIGILL);
|
||||
make_task_dead(SIGILL);
|
||||
si_signo = SIGILL;
|
||||
}
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ void __noreturn die(const char *str, struct pt_regs *regs, int err)
|
|||
|
||||
bust_spinlocks(0);
|
||||
spin_unlock_irq(&die_lock);
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(die);
|
||||
|
@ -240,7 +240,7 @@ void unhandled_interruption(struct pt_regs *regs)
|
|||
pr_emerg("unhandled_interruption\n");
|
||||
show_regs(regs);
|
||||
if (!user_mode(regs))
|
||||
do_exit(SIGKILL);
|
||||
make_task_dead(SIGKILL);
|
||||
force_sig(SIGKILL);
|
||||
}
|
||||
|
||||
|
@ -251,7 +251,7 @@ void unhandled_exceptions(unsigned long entry, unsigned long addr,
|
|||
addr, type);
|
||||
show_regs(regs);
|
||||
if (!user_mode(regs))
|
||||
do_exit(SIGKILL);
|
||||
make_task_dead(SIGKILL);
|
||||
force_sig(SIGKILL);
|
||||
}
|
||||
|
||||
|
@ -278,7 +278,7 @@ void do_revinsn(struct pt_regs *regs)
|
|||
pr_emerg("Reserved Instruction\n");
|
||||
show_regs(regs);
|
||||
if (!user_mode(regs))
|
||||
do_exit(SIGILL);
|
||||
make_task_dead(SIGILL);
|
||||
force_sig(SIGILL);
|
||||
}
|
||||
|
||||
|
|
|
@ -37,10 +37,10 @@ void die(const char *str, struct pt_regs *regs, long err)
|
|||
show_regs(regs);
|
||||
spin_unlock_irq(&die_lock);
|
||||
/*
|
||||
* do_exit() should take care of panic'ing from an interrupt
|
||||
* make_task_dead() should take care of panic'ing from an interrupt
|
||||
* context so we don't handle it here
|
||||
*/
|
||||
do_exit(err);
|
||||
make_task_dead(err);
|
||||
}
|
||||
|
||||
void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)
|
||||
|
|
|
@ -212,7 +212,7 @@ void __noreturn die(const char *str, struct pt_regs *regs, long err)
|
|||
__asm__ __volatile__("l.nop 1");
|
||||
do {} while (1);
|
||||
#endif
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
|
||||
/* This is normally the 'Oops' routine */
|
||||
|
|
|
@ -269,7 +269,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
|
|||
panic("Fatal exception");
|
||||
|
||||
oops_exit();
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
|
||||
/* gdb uses break 4,8 */
|
||||
|
|
|
@ -245,7 +245,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
|
|||
|
||||
if (panic_on_oops)
|
||||
panic("Fatal exception");
|
||||
do_exit(signr);
|
||||
make_task_dead(signr);
|
||||
}
|
||||
NOKPROBE_SYMBOL(oops_end);
|
||||
|
||||
|
@ -792,9 +792,9 @@ int machine_check_generic(struct pt_regs *regs)
|
|||
void die_mce(const char *str, struct pt_regs *regs, long err)
|
||||
{
|
||||
/*
|
||||
* The machine check wants to kill the interrupted context, but
|
||||
* do_exit() checks for in_interrupt() and panics in that case, so
|
||||
* exit the irq/nmi before calling die.
|
||||
* The machine check wants to kill the interrupted context,
|
||||
* but make_task_dead() checks for in_interrupt() and panics
|
||||
* in that case, so exit the irq/nmi before calling die.
|
||||
*/
|
||||
if (in_nmi())
|
||||
nmi_exit();
|
||||
|
|
|
@ -54,7 +54,7 @@ void die(struct pt_regs *regs, const char *str)
|
|||
if (panic_on_oops)
|
||||
panic("Fatal exception");
|
||||
if (ret != NOTIFY_STOP)
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
|
||||
void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
|
||||
|
|
|
@ -31,7 +31,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
|
|||
|
||||
bust_spinlocks(0);
|
||||
die(regs, "Oops");
|
||||
do_exit(SIGKILL);
|
||||
make_task_dead(SIGKILL);
|
||||
}
|
||||
|
||||
static inline void no_context(struct pt_regs *regs, unsigned long addr)
|
||||
|
|
|
@ -224,5 +224,5 @@ void __noreturn die(struct pt_regs *regs, const char *str)
|
|||
if (panic_on_oops)
|
||||
panic("Fatal exception: panic_on_oops");
|
||||
oops_exit();
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ void __s390_handle_mcck(void)
|
|||
"malfunction (code 0x%016lx).\n", mcck.mcck_code);
|
||||
printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
|
||||
current->comm, current->pid);
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ void __noreturn die(const char *str, struct pt_regs *regs, long err)
|
|||
if (panic_on_oops)
|
||||
panic("Fatal exception");
|
||||
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead(SIGSEGV);
|
||||
}
|
||||
|
||||
void die_if_kernel(const char *str, struct pt_regs *regs, long err)
|
||||
|
|
|
@ -86,9 +86,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
|
|||
}
|
||||
printk("Instruction DUMP:");
|
||||
instruction_dump ((unsigned long *) regs->pc);
|
||||
if(regs->psr & PSR_PS)
|
||||
do_exit(SIGKILL);
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV);
|
||||
}
|
||||
|
||||
void do_hw_interrupt(struct pt_regs *regs, unsigned long type)
|
||||
|
|
|
@ -2559,9 +2559,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
|
|||
}
|
||||
if (panic_on_oops)
|
||||
panic("Fatal exception");
|
||||
if (regs->tstate & TSTATE_PRIV)
|
||||
do_exit(SIGKILL);
|
||||
do_exit(SIGSEGV);
|
||||
make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV);
|
||||
}
|
||||
EXPORT_SYMBOL(die_if_kernel);
|
||||
|
||||
|
|
|
@ -1241,14 +1241,14 @@ SYM_CODE_START(asm_exc_nmi)
|
|||
SYM_CODE_END(asm_exc_nmi)
|
||||
|
||||
.pushsection .text, "ax"
|
||||
SYM_CODE_START(rewind_stack_do_exit)
|
||||
SYM_CODE_START(rewind_stack_and_make_dead)
|
||||
/* Prevent any naive code from trying to unwind to our caller. */
|
||||
xorl %ebp, %ebp
|
||||
|
||||
movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
|
||||
leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
|
||||
|
||||
call do_exit
|
||||
call make_task_dead
|
||||
1: jmp 1b
|
||||
SYM_CODE_END(rewind_stack_do_exit)
|
||||
SYM_CODE_END(rewind_stack_and_make_dead)
|
||||
.popsection
|
||||
|
|
|
@ -1427,7 +1427,7 @@ SYM_CODE_END(ignore_sysret)
|
|||
#endif
|
||||
|
||||
.pushsection .text, "ax"
|
||||
SYM_CODE_START(rewind_stack_do_exit)
|
||||
SYM_CODE_START(rewind_stack_and_make_dead)
|
||||
UNWIND_HINT_FUNC
|
||||
/* Prevent any naive code from trying to unwind to our caller. */
|
||||
xorl %ebp, %ebp
|
||||
|
@ -1436,6 +1436,6 @@ SYM_CODE_START(rewind_stack_do_exit)
|
|||
leaq -PTREGS_SIZE(%rax), %rsp
|
||||
UNWIND_HINT_REGS
|
||||
|
||||
call do_exit
|
||||
SYM_CODE_END(rewind_stack_do_exit)
|
||||
call make_task_dead
|
||||
SYM_CODE_END(rewind_stack_and_make_dead)
|
||||
.popsection
|
||||
|
|
|
@ -351,7 +351,7 @@ unsigned long oops_begin(void)
|
|||
}
|
||||
NOKPROBE_SYMBOL(oops_begin);
|
||||
|
||||
void __noreturn rewind_stack_do_exit(int signr);
|
||||
void __noreturn rewind_stack_and_make_dead(int signr);
|
||||
|
||||
void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
|
||||
{
|
||||
|
@ -386,7 +386,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
|
|||
* reuse the task stack and that existing poisons are invalid.
|
||||
*/
|
||||
kasan_unpoison_task_stack(current);
|
||||
rewind_stack_do_exit(signr);
|
||||
rewind_stack_and_make_dead(signr);
|
||||
}
|
||||
NOKPROBE_SYMBOL(oops_end);
|
||||
|
||||
|
|
|
@ -1433,7 +1433,7 @@ ENTRY(fast_syscall_spill_registers)
|
|||
rsync
|
||||
|
||||
movi abi_arg0, SIGSEGV
|
||||
abi_call do_exit
|
||||
abi_call make_task_dead
|
||||
|
||||
/* shouldn't return, so panic */
|
||||
|
||||
|
|
|
@ -552,5 +552,5 @@ void __noreturn die(const char * str, struct pt_regs * regs, long err)
|
|||
if (panic_on_oops)
|
||||
panic("Fatal exception");
|
||||
|
||||
do_exit(err);
|
||||
make_task_dead(err);
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ static int cryptomgr_probe(void *data)
|
|||
complete_all(¶m->larval->completion);
|
||||
crypto_alg_put(¶m->larval->alg);
|
||||
kfree(param);
|
||||
module_put_and_exit(0);
|
||||
module_put_and_kthread_exit(0);
|
||||
}
|
||||
|
||||
static int cryptomgr_schedule_probe(struct crypto_larval *larval)
|
||||
|
@ -190,7 +190,7 @@ static int cryptomgr_test(void *data)
|
|||
crypto_alg_tested(param->driver, err);
|
||||
|
||||
kfree(param);
|
||||
module_put_and_exit(0);
|
||||
module_put_and_kthread_exit(0);
|
||||
}
|
||||
|
||||
static int cryptomgr_schedule_test(struct crypto_alg *alg)
|
||||
|
|
|
@ -63,7 +63,7 @@ static void rsi_coex_scheduler_thread(struct rsi_common *common)
|
|||
rsi_coex_sched_tx_pkts(coex_cb);
|
||||
} while (atomic_read(&coex_cb->coex_tx_thread.thread_done) == 0);
|
||||
|
||||
complete_and_exit(&coex_cb->coex_tx_thread.completion, 0);
|
||||
kthread_complete_and_exit(&coex_cb->coex_tx_thread.completion, 0);
|
||||
}
|
||||
|
||||
int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg)
|
||||
|
|
|
@ -264,7 +264,7 @@ static void rsi_tx_scheduler_thread(struct rsi_common *common)
|
|||
if (common->init_done)
|
||||
rsi_core_qos_processor(common);
|
||||
} while (atomic_read(&common->tx_thread.thread_done) == 0);
|
||||
complete_and_exit(&common->tx_thread.completion, 0);
|
||||
kthread_complete_and_exit(&common->tx_thread.completion, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RSI_COEX
|
||||
|
|
|
@ -75,7 +75,7 @@ void rsi_sdio_rx_thread(struct rsi_common *common)
|
|||
|
||||
rsi_dbg(INFO_ZONE, "%s: Terminated SDIO RX thread\n", __func__);
|
||||
atomic_inc(&sdev->rx_thread.thread_done);
|
||||
complete_and_exit(&sdev->rx_thread.completion, 0);
|
||||
kthread_complete_and_exit(&sdev->rx_thread.completion, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -56,6 +56,6 @@ void rsi_usb_rx_thread(struct rsi_common *common)
|
|||
out:
|
||||
rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__);
|
||||
skb_queue_purge(&dev->rx_q);
|
||||
complete_and_exit(&dev->rx_thread.completion, 0);
|
||||
kthread_complete_and_exit(&dev->rx_thread.completion, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -160,7 +160,7 @@ static int pnp_dock_thread(void *unused)
|
|||
* No dock to manage
|
||||
*/
|
||||
case PNP_FUNCTION_NOT_SUPPORTED:
|
||||
complete_and_exit(&unload_sem, 0);
|
||||
kthread_complete_and_exit(&unload_sem, 0);
|
||||
case PNP_SYSTEM_NOT_DOCKED:
|
||||
d = 0;
|
||||
break;
|
||||
|
@ -170,7 +170,7 @@ static int pnp_dock_thread(void *unused)
|
|||
default:
|
||||
pnpbios_print_status("pnp_dock_thread", status);
|
||||
printk(KERN_WARNING "PnPBIOS: disabling dock monitoring.\n");
|
||||
complete_and_exit(&unload_sem, 0);
|
||||
kthread_complete_and_exit(&unload_sem, 0);
|
||||
}
|
||||
if (d != docked) {
|
||||
if (pnp_dock_event(d, &now) == 0) {
|
||||
|
@ -183,7 +183,7 @@ static int pnp_dock_thread(void *unused)
|
|||
}
|
||||
}
|
||||
}
|
||||
complete_and_exit(&unload_sem, 0);
|
||||
kthread_complete_and_exit(&unload_sem, 0);
|
||||
}
|
||||
|
||||
static int pnpbios_get_resources(struct pnp_dev *dev)
|
||||
|
|
|
@ -450,13 +450,13 @@ static int rtsx_control_thread(void *__dev)
|
|||
* after the down() -- that's necessary for the thread-shutdown
|
||||
* case.
|
||||
*
|
||||
* complete_and_exit() goes even further than this -- it is safe in
|
||||
* the case that the thread of the caller is going away (not just
|
||||
* the structure) -- this is necessary for the module-remove case.
|
||||
* This is important in preemption kernels, which transfer the flow
|
||||
* of execution immediately upon a complete().
|
||||
* kthread_complete_and_exit() goes even further than this --
|
||||
* it is safe in the case that the thread of the caller is going away
|
||||
* (not just the structure) -- this is necessary for the module-remove
|
||||
* case. This is important in preemption kernels, which transfer the
|
||||
* flow of execution immediately upon a complete().
|
||||
*/
|
||||
complete_and_exit(&dev->control_exit, 0);
|
||||
kthread_complete_and_exit(&dev->control_exit, 0);
|
||||
}
|
||||
|
||||
static int rtsx_polling_thread(void *__dev)
|
||||
|
@ -501,7 +501,7 @@ static int rtsx_polling_thread(void *__dev)
|
|||
mutex_unlock(&dev->dev_mutex);
|
||||
}
|
||||
|
||||
complete_and_exit(&dev->polling_exit, 0);
|
||||
kthread_complete_and_exit(&dev->polling_exit, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -682,7 +682,7 @@ static int rtsx_scan_thread(void *__dev)
|
|||
/* Should we unbind if no devices were detected? */
|
||||
}
|
||||
|
||||
complete_and_exit(&dev->scanning_done, 0);
|
||||
kthread_complete_and_exit(&dev->scanning_done, 0);
|
||||
}
|
||||
|
||||
static void rtsx_init_options(struct rtsx_chip *chip)
|
||||
|
|
|
@ -969,7 +969,7 @@ static int usbatm_do_heavy_init(void *arg)
|
|||
instance->thread = NULL;
|
||||
mutex_unlock(&instance->serialize);
|
||||
|
||||
complete_and_exit(&instance->thread_exited, ret);
|
||||
kthread_complete_and_exit(&instance->thread_exited, ret);
|
||||
}
|
||||
|
||||
static int usbatm_heavy_init(struct usbatm_data *instance)
|
||||
|
|
|
@ -2547,7 +2547,7 @@ static int fsg_main_thread(void *common_)
|
|||
up_write(&common->filesem);
|
||||
|
||||
/* Let fsg_unbind() know the thread has exited */
|
||||
complete_and_exit(&common->thread_notifier, 0);
|
||||
kthread_complete_and_exit(&common->thread_notifier, 0);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1139,7 +1139,7 @@ cifs_demultiplex_thread(void *p)
|
|||
}
|
||||
|
||||
memalloc_noreclaim_restore(noreclaim_flag);
|
||||
module_put_and_exit(0);
|
||||
module_put_and_kthread_exit(0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -347,13 +347,13 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
|
|||
return ispipe;
|
||||
}
|
||||
|
||||
static int zap_process(struct task_struct *start, int exit_code, int flags)
|
||||
static int zap_process(struct task_struct *start, int exit_code)
|
||||
{
|
||||
struct task_struct *t;
|
||||
int nr = 0;
|
||||
|
||||
/* ignore all signals except SIGKILL, see prepare_signal() */
|
||||
start->signal->flags = SIGNAL_GROUP_COREDUMP | flags;
|
||||
start->signal->flags = SIGNAL_GROUP_EXIT;
|
||||
start->signal->group_exit_code = exit_code;
|
||||
start->signal->group_stop_count = 0;
|
||||
|
||||
|
@ -372,13 +372,13 @@ static int zap_process(struct task_struct *start, int exit_code, int flags)
|
|||
static int zap_threads(struct task_struct *tsk,
|
||||
struct core_state *core_state, int exit_code)
|
||||
{
|
||||
struct signal_struct *signal = tsk->signal;
|
||||
int nr = -EAGAIN;
|
||||
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
if (!signal_group_exit(tsk->signal)) {
|
||||
tsk->signal->core_state = core_state;
|
||||
tsk->signal->group_exit_task = tsk;
|
||||
nr = zap_process(tsk, exit_code, 0);
|
||||
if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) {
|
||||
signal->core_state = core_state;
|
||||
nr = zap_process(tsk, exit_code);
|
||||
clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
|
||||
tsk->flags |= PF_DUMPCORE;
|
||||
atomic_set(&core_state->nr_threads, nr);
|
||||
|
@ -426,8 +426,6 @@ static void coredump_finish(bool core_dumped)
|
|||
spin_lock_irq(¤t->sighand->siglock);
|
||||
if (core_dumped && !__fatal_signal_pending(current))
|
||||
current->signal->group_exit_code |= 0x80;
|
||||
current->signal->group_exit_task = NULL;
|
||||
current->signal->flags = SIGNAL_GROUP_EXIT;
|
||||
next = current->signal->core_state->dumper.next;
|
||||
current->signal->core_state = NULL;
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
|
12
fs/exec.c
12
fs/exec.c
|
@ -1045,7 +1045,7 @@ static int de_thread(struct task_struct *tsk)
|
|||
* Kill all other threads in the thread group.
|
||||
*/
|
||||
spin_lock_irq(lock);
|
||||
if (signal_group_exit(sig)) {
|
||||
if ((sig->flags & SIGNAL_GROUP_EXIT) || sig->group_exec_task) {
|
||||
/*
|
||||
* Another group action in progress, just
|
||||
* return so that the signal is processed.
|
||||
|
@ -1054,7 +1054,7 @@ static int de_thread(struct task_struct *tsk)
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
sig->group_exit_task = tsk;
|
||||
sig->group_exec_task = tsk;
|
||||
sig->notify_count = zap_other_threads(tsk);
|
||||
if (!thread_group_leader(tsk))
|
||||
sig->notify_count--;
|
||||
|
@ -1082,7 +1082,7 @@ static int de_thread(struct task_struct *tsk)
|
|||
write_lock_irq(&tasklist_lock);
|
||||
/*
|
||||
* Do this under tasklist_lock to ensure that
|
||||
* exit_notify() can't miss ->group_exit_task
|
||||
* exit_notify() can't miss ->group_exec_task
|
||||
*/
|
||||
sig->notify_count = -1;
|
||||
if (likely(leader->exit_state))
|
||||
|
@ -1149,7 +1149,7 @@ static int de_thread(struct task_struct *tsk)
|
|||
release_task(leader);
|
||||
}
|
||||
|
||||
sig->group_exit_task = NULL;
|
||||
sig->group_exec_task = NULL;
|
||||
sig->notify_count = 0;
|
||||
|
||||
no_thread_group:
|
||||
|
@ -1162,7 +1162,7 @@ static int de_thread(struct task_struct *tsk)
|
|||
killed:
|
||||
/* protects against exit_notify() and __exit_signal() */
|
||||
read_lock(&tasklist_lock);
|
||||
sig->group_exit_task = NULL;
|
||||
sig->group_exec_task = NULL;
|
||||
sig->notify_count = 0;
|
||||
read_unlock(&tasklist_lock);
|
||||
return -EAGAIN;
|
||||
|
@ -1307,6 +1307,8 @@ int begin_new_exec(struct linux_binprm * bprm)
|
|||
*/
|
||||
force_uaccess_begin();
|
||||
|
||||
if (me->flags & PF_KTHREAD)
|
||||
free_kthread_struct(me);
|
||||
me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
|
||||
PF_NOFREEZE | PF_NO_SETAFFINITY);
|
||||
flush_thread();
|
||||
|
|
|
@ -670,7 +670,7 @@ static int io_wqe_worker(void *data)
|
|||
*/
|
||||
void io_wq_worker_running(struct task_struct *tsk)
|
||||
{
|
||||
struct io_worker *worker = tsk->pf_io_worker;
|
||||
struct io_worker *worker = tsk->worker_private;
|
||||
|
||||
if (!worker)
|
||||
return;
|
||||
|
@ -688,7 +688,7 @@ void io_wq_worker_running(struct task_struct *tsk)
|
|||
*/
|
||||
void io_wq_worker_sleeping(struct task_struct *tsk)
|
||||
{
|
||||
struct io_worker *worker = tsk->pf_io_worker;
|
||||
struct io_worker *worker = tsk->worker_private;
|
||||
|
||||
if (!worker)
|
||||
return;
|
||||
|
@ -707,7 +707,7 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
|
|||
static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
tsk->pf_io_worker = worker;
|
||||
tsk->worker_private = worker;
|
||||
worker->task = tsk;
|
||||
set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
|
||||
tsk->flags |= PF_NO_SETAFFINITY;
|
||||
|
|
|
@ -222,6 +222,6 @@ static inline void io_wq_worker_running(struct task_struct *tsk)
|
|||
static inline bool io_wq_current_is_worker(void)
|
||||
{
|
||||
return in_task() && (current->flags & PF_IO_WORKER) &&
|
||||
current->pf_io_worker;
|
||||
current->worker_private;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -161,5 +161,5 @@ static int jffs2_garbage_collect_thread(void *_c)
|
|||
spin_lock(&c->erase_completion_lock);
|
||||
c->gc_task = NULL;
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
complete_and_exit(&c->gc_thread_exit, 0);
|
||||
kthread_complete_and_exit(&c->gc_thread_exit, 0);
|
||||
}
|
||||
|
|
|
@ -185,7 +185,7 @@ lockd(void *vrqstp)
|
|||
|
||||
svc_exit_thread(rqstp);
|
||||
|
||||
module_put_and_exit(0);
|
||||
module_put_and_kthread_exit(0);
|
||||
}
|
||||
|
||||
static int create_lockd_listener(struct svc_serv *serv, const char *name,
|
||||
|
|
|
@ -93,7 +93,7 @@ nfs4_callback_svc(void *vrqstp)
|
|||
svc_process(rqstp);
|
||||
}
|
||||
svc_exit_thread(rqstp);
|
||||
module_put_and_exit(0);
|
||||
module_put_and_kthread_exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -137,7 +137,7 @@ nfs41_callback_svc(void *vrqstp)
|
|||
}
|
||||
}
|
||||
svc_exit_thread(rqstp);
|
||||
module_put_and_exit(0);
|
||||
module_put_and_kthread_exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2693,6 +2693,6 @@ static int nfs4_run_state_manager(void *ptr)
|
|||
allow_signal(SIGKILL);
|
||||
nfs4_state_manager(clp);
|
||||
nfs_put_client(clp);
|
||||
module_put_and_exit(0);
|
||||
module_put_and_kthread_exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1020,7 +1020,7 @@ nfsd(void *vrqstp)
|
|||
}
|
||||
|
||||
/* Release module */
|
||||
module_put_and_exit(0);
|
||||
module_put_and_kthread_exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -468,6 +468,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
|||
u64 cgtime, gtime;
|
||||
unsigned long rsslim = 0;
|
||||
unsigned long flags;
|
||||
int exit_code = task->exit_code;
|
||||
|
||||
state = *get_task_state(task);
|
||||
vsize = eip = esp = 0;
|
||||
|
@ -531,6 +532,9 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
|||
maj_flt += sig->maj_flt;
|
||||
thread_group_cputime_adjusted(task, &utime, &stime);
|
||||
gtime += sig->gtime;
|
||||
|
||||
if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED))
|
||||
exit_code = sig->group_exit_code;
|
||||
}
|
||||
|
||||
sid = task_session_nr_ns(task, ns);
|
||||
|
@ -630,7 +634,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
|||
seq_puts(m, " 0 0 0 0 0 0 0");
|
||||
|
||||
if (permitted)
|
||||
seq_put_decimal_ll(m, " ", task->exit_code);
|
||||
seq_put_decimal_ll(m, " ", exit_code);
|
||||
else
|
||||
seq_puts(m, " 0");
|
||||
|
||||
|
|
|
@ -155,11 +155,12 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
|
|||
static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info,
|
||||
int nonblock)
|
||||
{
|
||||
enum pid_type type;
|
||||
ssize_t ret;
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
ret = dequeue_signal(current, &ctx->sigmask, info);
|
||||
ret = dequeue_signal(current, &ctx->sigmask, info, &type);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
if (!nonblock)
|
||||
|
@ -174,7 +175,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info
|
|||
add_wait_queue(¤t->sighand->signalfd_wqh, &wait);
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
ret = dequeue_signal(current, &ctx->sigmask, info);
|
||||
ret = dequeue_signal(current, &ctx->sigmask, info, &type);
|
||||
if (ret != 0)
|
||||
break;
|
||||
if (signal_pending(current)) {
|
||||
|
|
|
@ -187,7 +187,6 @@ static inline void might_fault(void) { }
|
|||
#endif
|
||||
|
||||
void do_exit(long error_code) __noreturn;
|
||||
void complete_and_exit(struct completion *, long) __noreturn;
|
||||
|
||||
extern int num_to_str(char *buf, int size,
|
||||
unsigned long long num, unsigned int width);
|
||||
|
|
|
@ -33,7 +33,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
|
|||
unsigned int cpu,
|
||||
const char *namefmt);
|
||||
|
||||
void set_kthread_struct(struct task_struct *p);
|
||||
bool set_kthread_struct(struct task_struct *p);
|
||||
|
||||
void kthread_set_per_cpu(struct task_struct *k, int cpu);
|
||||
bool kthread_is_per_cpu(struct task_struct *k);
|
||||
|
@ -95,6 +95,8 @@ void *kthread_probe_data(struct task_struct *k);
|
|||
int kthread_park(struct task_struct *k);
|
||||
void kthread_unpark(struct task_struct *k);
|
||||
void kthread_parkme(void);
|
||||
void kthread_exit(long result) __noreturn;
|
||||
void kthread_complete_and_exit(struct completion *, long) __noreturn;
|
||||
|
||||
int kthreadd(void *unused);
|
||||
extern struct task_struct *kthreadd_task;
|
||||
|
|
|
@ -595,9 +595,9 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
|||
/* Look for this name: can be of form module:name. */
|
||||
unsigned long module_kallsyms_lookup_name(const char *name);
|
||||
|
||||
extern void __noreturn __module_put_and_exit(struct module *mod,
|
||||
extern void __noreturn __module_put_and_kthread_exit(struct module *mod,
|
||||
long code);
|
||||
#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
|
||||
#define module_put_and_kthread_exit(code) __module_put_and_kthread_exit(THIS_MODULE, code)
|
||||
|
||||
#ifdef CONFIG_MODULE_UNLOAD
|
||||
int module_refcount(struct module *mod);
|
||||
|
@ -790,7 +790,7 @@ static inline int unregister_module_notifier(struct notifier_block *nb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define module_put_and_exit(code) do_exit(code)
|
||||
#define module_put_and_kthread_exit(code) kthread_exit(code)
|
||||
|
||||
static inline void print_modules(void)
|
||||
{
|
||||
|
|
|
@ -31,11 +31,6 @@ static inline int create_proc_profile(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
enum profile_type {
|
||||
PROFILE_TASK_EXIT,
|
||||
PROFILE_MUNMAP
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PROFILING
|
||||
|
||||
extern int prof_on __read_mostly;
|
||||
|
@ -66,23 +61,6 @@ static inline void profile_hit(int type, void *ip)
|
|||
struct task_struct;
|
||||
struct mm_struct;
|
||||
|
||||
/* task is in do_exit() */
|
||||
void profile_task_exit(struct task_struct * task);
|
||||
|
||||
/* task is dead, free task struct ? Returns 1 if
|
||||
* the task was taken, 0 if the task should be freed.
|
||||
*/
|
||||
int profile_handoff_task(struct task_struct * task);
|
||||
|
||||
/* sys_munmap */
|
||||
void profile_munmap(unsigned long addr);
|
||||
|
||||
int task_handoff_register(struct notifier_block * n);
|
||||
int task_handoff_unregister(struct notifier_block * n);
|
||||
|
||||
int profile_event_register(enum profile_type, struct notifier_block * n);
|
||||
int profile_event_unregister(enum profile_type, struct notifier_block * n);
|
||||
|
||||
#else
|
||||
|
||||
#define prof_on 0
|
||||
|
@ -107,29 +85,6 @@ static inline void profile_hit(int type, void *ip)
|
|||
return;
|
||||
}
|
||||
|
||||
static inline int task_handoff_register(struct notifier_block * n)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int task_handoff_unregister(struct notifier_block * n)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int profile_event_register(enum profile_type t, struct notifier_block * n)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
#define profile_task_exit(a) do { } while (0)
|
||||
#define profile_handoff_task(a) (0)
|
||||
#define profile_munmap(a) do { } while (0)
|
||||
|
||||
#endif /* CONFIG_PROFILING */
|
||||
|
||||
|
|
|
@ -991,8 +991,8 @@ struct task_struct {
|
|||
/* CLONE_CHILD_CLEARTID: */
|
||||
int __user *clear_child_tid;
|
||||
|
||||
/* PF_IO_WORKER */
|
||||
void *pf_io_worker;
|
||||
/* PF_KTHREAD | PF_IO_WORKER */
|
||||
void *worker_private;
|
||||
|
||||
u64 utime;
|
||||
u64 stime;
|
||||
|
|
|
@ -109,13 +109,9 @@ struct signal_struct {
|
|||
|
||||
/* thread group exit support */
|
||||
int group_exit_code;
|
||||
/* overloaded:
|
||||
* - notify group_exit_task when ->count is equal to notify_count
|
||||
* - everyone except group_exit_task is stopped during signal delivery
|
||||
* of fatal signals, group_exit_task processes the signal.
|
||||
*/
|
||||
/* notify group_exec_task when notify_count is less or equal to 0 */
|
||||
int notify_count;
|
||||
struct task_struct *group_exit_task;
|
||||
struct task_struct *group_exec_task;
|
||||
|
||||
/* thread group stop support, overloads group_exit_code too */
|
||||
int group_stop_count;
|
||||
|
@ -256,7 +252,6 @@ struct signal_struct {
|
|||
#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
|
||||
#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
|
||||
#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
|
||||
#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
|
||||
/*
|
||||
* Pending notifications to parent.
|
||||
*/
|
||||
|
@ -272,31 +267,25 @@ struct signal_struct {
|
|||
static inline void signal_set_stop_flags(struct signal_struct *sig,
|
||||
unsigned int flags)
|
||||
{
|
||||
WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
|
||||
WARN_ON(sig->flags & SIGNAL_GROUP_EXIT);
|
||||
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
|
||||
}
|
||||
|
||||
/* If true, all threads except ->group_exit_task have pending SIGKILL */
|
||||
static inline int signal_group_exit(const struct signal_struct *sig)
|
||||
{
|
||||
return (sig->flags & SIGNAL_GROUP_EXIT) ||
|
||||
(sig->group_exit_task != NULL);
|
||||
}
|
||||
|
||||
extern void flush_signals(struct task_struct *);
|
||||
extern void ignore_signals(struct task_struct *);
|
||||
extern void flush_signal_handlers(struct task_struct *, int force_default);
|
||||
extern int dequeue_signal(struct task_struct *task,
|
||||
sigset_t *mask, kernel_siginfo_t *info);
|
||||
extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
|
||||
kernel_siginfo_t *info, enum pid_type *type);
|
||||
|
||||
static inline int kernel_dequeue_signal(void)
|
||||
{
|
||||
struct task_struct *task = current;
|
||||
kernel_siginfo_t __info;
|
||||
enum pid_type __type;
|
||||
int ret;
|
||||
|
||||
spin_lock_irq(&task->sighand->siglock);
|
||||
ret = dequeue_signal(task, &task->blocked, &__info);
|
||||
ret = dequeue_signal(task, &task->blocked, &__info, &__type);
|
||||
spin_unlock_irq(&task->sighand->siglock);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -59,6 +59,7 @@ extern void sched_post_fork(struct task_struct *p,
|
|||
extern void sched_dead(struct task_struct *p);
|
||||
|
||||
void __noreturn do_task_dead(void);
|
||||
void __noreturn make_task_dead(int signr);
|
||||
|
||||
extern void proc_caches_init(void);
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ struct svc_serv_ops {
|
|||
void (*svo_enqueue_xprt)(struct svc_xprt *);
|
||||
|
||||
/* optional module to count when adding threads.
|
||||
* Thread function must call module_put_and_exit() to exit.
|
||||
* Thread function must call module_put_and_kthread_exit() to exit.
|
||||
*/
|
||||
struct module *svo_module;
|
||||
};
|
||||
|
|
|
@ -54,8 +54,7 @@ struct linux_binprm;
|
|||
/*
|
||||
* ptrace report for syscall entry and exit looks identical.
|
||||
*/
|
||||
static inline int ptrace_report_syscall(struct pt_regs *regs,
|
||||
unsigned long message)
|
||||
static inline int ptrace_report_syscall(unsigned long message)
|
||||
{
|
||||
int ptrace = current->ptrace;
|
||||
|
||||
|
@ -102,7 +101,7 @@ static inline int ptrace_report_syscall(struct pt_regs *regs,
|
|||
static inline __must_check int tracehook_report_syscall_entry(
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_ENTRY);
|
||||
return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -127,7 +126,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
|
|||
if (step)
|
||||
user_single_step_report(regs);
|
||||
else
|
||||
ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_EXIT);
|
||||
ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -116,7 +116,7 @@ static void __exit_signal(struct task_struct *tsk)
|
|||
* then notify it:
|
||||
*/
|
||||
if (sig->notify_count > 0 && !--sig->notify_count)
|
||||
wake_up_process(sig->group_exit_task);
|
||||
wake_up_process(sig->group_exec_task);
|
||||
|
||||
if (tsk == sig->curr_target)
|
||||
sig->curr_target = next_thread(tsk);
|
||||
|
@ -697,7 +697,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
|
|||
|
||||
/* mt-exec, de_thread() is waiting for group leader */
|
||||
if (unlikely(tsk->signal->notify_count < 0))
|
||||
wake_up_process(tsk->signal->group_exit_task);
|
||||
wake_up_process(tsk->signal->group_exec_task);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
|
||||
list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
|
||||
|
@ -735,37 +735,22 @@ void __noreturn do_exit(long code)
|
|||
struct task_struct *tsk = current;
|
||||
int group_dead;
|
||||
|
||||
/*
|
||||
* We can get here from a kernel oops, sometimes with preemption off.
|
||||
* Start by checking for critical errors.
|
||||
* Then fix up important state like USER_DS and preemption.
|
||||
* Then do everything else.
|
||||
*/
|
||||
|
||||
WARN_ON(blk_needs_flush_plug(tsk));
|
||||
|
||||
if (unlikely(in_interrupt()))
|
||||
panic("Aiee, killing interrupt handler!");
|
||||
if (unlikely(!tsk->pid))
|
||||
panic("Attempted to kill the idle task!");
|
||||
|
||||
/*
|
||||
* If do_exit is called because this processes oopsed, it's possible
|
||||
* If do_dead is called because this processes oopsed, it's possible
|
||||
* that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
|
||||
* continuing. Amongst other possible reasons, this is to prevent
|
||||
* mm_release()->clear_child_tid() from writing to a user-controlled
|
||||
* kernel address.
|
||||
*
|
||||
* On uptodate architectures force_uaccess_begin is a noop. On
|
||||
* architectures that still have set_fs/get_fs in addition to handling
|
||||
* oopses handles kernel threads that run as set_fs(KERNEL_DS) by
|
||||
* default.
|
||||
*/
|
||||
force_uaccess_begin();
|
||||
|
||||
if (unlikely(in_atomic())) {
|
||||
pr_info("note: %s[%d] exited with preempt_count %d\n",
|
||||
current->comm, task_pid_nr(current),
|
||||
preempt_count());
|
||||
preempt_count_set(PREEMPT_ENABLED);
|
||||
}
|
||||
|
||||
profile_task_exit(tsk);
|
||||
kcov_task_exit(tsk);
|
||||
|
||||
coredump_task_exit(tsk);
|
||||
|
@ -773,17 +758,6 @@ void __noreturn do_exit(long code)
|
|||
|
||||
validate_creds_for_do_exit(tsk);
|
||||
|
||||
/*
|
||||
* We're taking recursive faults here in do_exit. Safest is to just
|
||||
* leave this task alone and wait for reboot.
|
||||
*/
|
||||
if (unlikely(tsk->flags & PF_EXITING)) {
|
||||
pr_alert("Fixing recursive fault but reboot is needed!\n");
|
||||
futex_exit_recursive(tsk);
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule();
|
||||
}
|
||||
|
||||
io_uring_files_cancel();
|
||||
exit_signals(tsk); /* sets PF_EXITING */
|
||||
|
||||
|
@ -882,16 +856,46 @@ void __noreturn do_exit(long code)
|
|||
lockdep_free_task(tsk);
|
||||
do_task_dead();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(do_exit);
|
||||
|
||||
void complete_and_exit(struct completion *comp, long code)
|
||||
void __noreturn make_task_dead(int signr)
|
||||
{
|
||||
if (comp)
|
||||
complete(comp);
|
||||
/*
|
||||
* Take the task off the cpu after something catastrophic has
|
||||
* happened.
|
||||
*
|
||||
* We can get here from a kernel oops, sometimes with preemption off.
|
||||
* Start by checking for critical errors.
|
||||
* Then fix up important state like USER_DS and preemption.
|
||||
* Then do everything else.
|
||||
*/
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
do_exit(code);
|
||||
if (unlikely(in_interrupt()))
|
||||
panic("Aiee, killing interrupt handler!");
|
||||
if (unlikely(!tsk->pid))
|
||||
panic("Attempted to kill the idle task!");
|
||||
|
||||
if (unlikely(in_atomic())) {
|
||||
pr_info("note: %s[%d] exited with preempt_count %d\n",
|
||||
current->comm, task_pid_nr(current),
|
||||
preempt_count());
|
||||
preempt_count_set(PREEMPT_ENABLED);
|
||||
}
|
||||
|
||||
/*
|
||||
* We're taking recursive faults here in make_task_dead. Safest is to just
|
||||
* leave this task alone and wait for reboot.
|
||||
*/
|
||||
if (unlikely(tsk->flags & PF_EXITING)) {
|
||||
pr_alert("Fixing recursive fault but reboot is needed!\n");
|
||||
futex_exit_recursive(tsk);
|
||||
tsk->exit_state = EXIT_DEAD;
|
||||
refcount_inc(&tsk->rcu_users);
|
||||
do_task_dead();
|
||||
}
|
||||
|
||||
do_exit(signr);
|
||||
}
|
||||
EXPORT_SYMBOL(complete_and_exit);
|
||||
|
||||
SYSCALL_DEFINE1(exit, int, error_code)
|
||||
{
|
||||
|
@ -907,17 +911,19 @@ do_group_exit(int exit_code)
|
|||
{
|
||||
struct signal_struct *sig = current->signal;
|
||||
|
||||
BUG_ON(exit_code & 0x80); /* core dumps don't get here */
|
||||
|
||||
if (signal_group_exit(sig))
|
||||
if (sig->flags & SIGNAL_GROUP_EXIT)
|
||||
exit_code = sig->group_exit_code;
|
||||
else if (sig->group_exec_task)
|
||||
exit_code = 0;
|
||||
else if (!thread_group_empty(current)) {
|
||||
struct sighand_struct *const sighand = current->sighand;
|
||||
|
||||
spin_lock_irq(&sighand->siglock);
|
||||
if (signal_group_exit(sig))
|
||||
if (sig->flags & SIGNAL_GROUP_EXIT)
|
||||
/* Another thread got here before we took the lock. */
|
||||
exit_code = sig->group_exit_code;
|
||||
else if (sig->group_exec_task)
|
||||
exit_code = 0;
|
||||
else {
|
||||
sig->group_exit_code = exit_code;
|
||||
sig->flags = SIGNAL_GROUP_EXIT;
|
||||
|
@ -1012,7 +1018,8 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
|
|||
return 0;
|
||||
|
||||
if (unlikely(wo->wo_flags & WNOWAIT)) {
|
||||
status = p->exit_code;
|
||||
status = (p->signal->flags & SIGNAL_GROUP_EXIT)
|
||||
? p->signal->group_exit_code : p->exit_code;
|
||||
get_task_struct(p);
|
||||
read_unlock(&tasklist_lock);
|
||||
sched_annotate_sleep();
|
||||
|
|
|
@ -757,9 +757,7 @@ void __put_task_struct(struct task_struct *tsk)
|
|||
delayacct_tsk_free(tsk);
|
||||
put_signal_struct(tsk->signal);
|
||||
sched_core_free(tsk);
|
||||
|
||||
if (!profile_handoff_task(tsk))
|
||||
free_task(tsk);
|
||||
free_task(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__put_task_struct);
|
||||
|
||||
|
@ -953,7 +951,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
|||
tsk->splice_pipe = NULL;
|
||||
tsk->task_frag.page = NULL;
|
||||
tsk->wake_q.next = NULL;
|
||||
tsk->pf_io_worker = NULL;
|
||||
tsk->worker_private = NULL;
|
||||
|
||||
account_kernel_stack(tsk, 1);
|
||||
|
||||
|
@ -2009,12 +2007,6 @@ static __latent_entropy struct task_struct *copy_process(
|
|||
siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
|
||||
}
|
||||
|
||||
/*
|
||||
* This _must_ happen before we call free_task(), i.e. before we jump
|
||||
* to any of the bad_fork_* labels. This is to avoid freeing
|
||||
* p->set_child_tid which is (ab)used as a kthread's data pointer for
|
||||
* kernel threads (PF_KTHREAD).
|
||||
*/
|
||||
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL;
|
||||
/*
|
||||
* Clear TID on mm_release()?
|
||||
|
@ -2095,12 +2087,16 @@ static __latent_entropy struct task_struct *copy_process(
|
|||
p->io_context = NULL;
|
||||
audit_set_context(p, NULL);
|
||||
cgroup_fork(p);
|
||||
if (p->flags & PF_KTHREAD) {
|
||||
if (!set_kthread_struct(p))
|
||||
goto bad_fork_cleanup_delayacct;
|
||||
}
|
||||
#ifdef CONFIG_NUMA
|
||||
p->mempolicy = mpol_dup(p->mempolicy);
|
||||
if (IS_ERR(p->mempolicy)) {
|
||||
retval = PTR_ERR(p->mempolicy);
|
||||
p->mempolicy = NULL;
|
||||
goto bad_fork_cleanup_threadgroup_lock;
|
||||
goto bad_fork_cleanup_delayacct;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_CPUSETS
|
||||
|
@ -2437,8 +2433,8 @@ static __latent_entropy struct task_struct *copy_process(
|
|||
lockdep_free_task(p);
|
||||
#ifdef CONFIG_NUMA
|
||||
mpol_put(p->mempolicy);
|
||||
bad_fork_cleanup_threadgroup_lock:
|
||||
#endif
|
||||
bad_fork_cleanup_delayacct:
|
||||
delayacct_tsk_free(p);
|
||||
bad_fork_cleanup_count:
|
||||
dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
|
||||
|
|
|
@ -1031,7 +1031,7 @@ static void futex_cleanup(struct task_struct *tsk)
|
|||
* actually finished the futex cleanup. The worst case for this is that the
|
||||
* waiter runs through the wait loop until the state becomes visible.
|
||||
*
|
||||
* This is called from the recursive fault handling path in do_exit().
|
||||
* This is called from the recursive fault handling path in make_task_dead().
|
||||
*
|
||||
* This is best effort. Either the futex exit code has run already or
|
||||
* not. If the OWNER_DIED bit has been set on the futex then the waiter can
|
||||
|
|
|
@ -81,7 +81,7 @@ int kexec_should_crash(struct task_struct *p)
|
|||
if (crash_kexec_post_notifiers)
|
||||
return 0;
|
||||
/*
|
||||
* There are 4 panic() calls in do_exit() path, each of which
|
||||
* There are 4 panic() calls in make_task_dead() path, each of which
|
||||
* corresponds to each of these 4 conditions.
|
||||
*/
|
||||
if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
|
||||
|
|
|
@ -52,6 +52,7 @@ struct kthread_create_info
|
|||
struct kthread {
|
||||
unsigned long flags;
|
||||
unsigned int cpu;
|
||||
int result;
|
||||
int (*threadfn)(void *);
|
||||
void *data;
|
||||
mm_segment_t oldfs;
|
||||
|
@ -71,7 +72,7 @@ enum KTHREAD_BITS {
|
|||
static inline struct kthread *to_kthread(struct task_struct *k)
|
||||
{
|
||||
WARN_ON(!(k->flags & PF_KTHREAD));
|
||||
return (__force void *)k->set_child_tid;
|
||||
return k->worker_private;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -79,7 +80,7 @@ static inline struct kthread *to_kthread(struct task_struct *k)
|
|||
*
|
||||
* Per construction; when:
|
||||
*
|
||||
* (p->flags & PF_KTHREAD) && p->set_child_tid
|
||||
* (p->flags & PF_KTHREAD) && p->worker_private
|
||||
*
|
||||
* the task is both a kthread and struct kthread is persistent. However
|
||||
* PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
|
||||
|
@ -87,26 +88,29 @@ static inline struct kthread *to_kthread(struct task_struct *k)
|
|||
*/
|
||||
static inline struct kthread *__to_kthread(struct task_struct *p)
|
||||
{
|
||||
void *kthread = (__force void *)p->set_child_tid;
|
||||
void *kthread = p->worker_private;
|
||||
if (kthread && !(p->flags & PF_KTHREAD))
|
||||
kthread = NULL;
|
||||
return kthread;
|
||||
}
|
||||
|
||||
void set_kthread_struct(struct task_struct *p)
|
||||
bool set_kthread_struct(struct task_struct *p)
|
||||
{
|
||||
struct kthread *kthread;
|
||||
|
||||
if (__to_kthread(p))
|
||||
return;
|
||||
if (WARN_ON_ONCE(to_kthread(p)))
|
||||
return false;
|
||||
|
||||
kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
|
||||
/*
|
||||
* We abuse ->set_child_tid to avoid the new member and because it
|
||||
* can't be wrongly copied by copy_process(). We also rely on fact
|
||||
* that the caller can't exec, so PF_KTHREAD can't be cleared.
|
||||
*/
|
||||
p->set_child_tid = (__force void __user *)kthread;
|
||||
if (!kthread)
|
||||
return false;
|
||||
|
||||
init_completion(&kthread->exited);
|
||||
init_completion(&kthread->parked);
|
||||
p->vfork_done = &kthread->exited;
|
||||
|
||||
p->worker_private = kthread;
|
||||
return true;
|
||||
}
|
||||
|
||||
void free_kthread_struct(struct task_struct *k)
|
||||
|
@ -114,13 +118,13 @@ void free_kthread_struct(struct task_struct *k)
|
|||
struct kthread *kthread;
|
||||
|
||||
/*
|
||||
* Can be NULL if this kthread was created by kernel_thread()
|
||||
* or if kmalloc() in kthread() failed.
|
||||
* Can be NULL if kmalloc() in set_kthread_struct() failed.
|
||||
*/
|
||||
kthread = to_kthread(k);
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
WARN_ON_ONCE(kthread && kthread->blkcg_css);
|
||||
#endif
|
||||
k->worker_private = NULL;
|
||||
kfree(kthread);
|
||||
}
|
||||
|
||||
|
@ -268,6 +272,44 @@ void kthread_parkme(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kthread_parkme);
|
||||
|
||||
/**
|
||||
* kthread_exit - Cause the current kthread return @result to kthread_stop().
|
||||
* @result: The integer value to return to kthread_stop().
|
||||
*
|
||||
* While kthread_exit can be called directly, it exists so that
|
||||
* functions which do some additional work in non-modular code such as
|
||||
* module_put_and_kthread_exit can be implemented.
|
||||
*
|
||||
* Does not return.
|
||||
*/
|
||||
void __noreturn kthread_exit(long result)
|
||||
{
|
||||
struct kthread *kthread = to_kthread(current);
|
||||
kthread->result = result;
|
||||
do_exit(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* kthread_complete_and_exit - Exit the current kthread.
|
||||
* @comp: Completion to complete
|
||||
* @code: The integer value to return to kthread_stop().
|
||||
*
|
||||
* If present complete @comp and the reuturn code to kthread_stop().
|
||||
*
|
||||
* A kernel thread whose module may be removed after the completion of
|
||||
* @comp can use this function exit safely.
|
||||
*
|
||||
* Does not return.
|
||||
*/
|
||||
void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
|
||||
{
|
||||
if (comp)
|
||||
complete(comp);
|
||||
|
||||
kthread_exit(code);
|
||||
}
|
||||
EXPORT_SYMBOL(kthread_complete_and_exit);
|
||||
|
||||
static int kthread(void *_create)
|
||||
{
|
||||
static const struct sched_param param = { .sched_priority = 0 };
|
||||
|
@ -279,27 +321,17 @@ static int kthread(void *_create)
|
|||
struct kthread *self;
|
||||
int ret;
|
||||
|
||||
set_kthread_struct(current);
|
||||
self = to_kthread(current);
|
||||
|
||||
/* If user was SIGKILLed, I release the structure. */
|
||||
done = xchg(&create->done, NULL);
|
||||
if (!done) {
|
||||
kfree(create);
|
||||
do_exit(-EINTR);
|
||||
}
|
||||
|
||||
if (!self) {
|
||||
create->result = ERR_PTR(-ENOMEM);
|
||||
complete(done);
|
||||
do_exit(-ENOMEM);
|
||||
kthread_exit(-EINTR);
|
||||
}
|
||||
|
||||
self->threadfn = threadfn;
|
||||
self->data = data;
|
||||
init_completion(&self->exited);
|
||||
init_completion(&self->parked);
|
||||
current->vfork_done = &self->exited;
|
||||
|
||||
/*
|
||||
* The new thread inherited kthreadd's priority and CPU mask. Reset
|
||||
|
@ -326,7 +358,7 @@ static int kthread(void *_create)
|
|||
__kthread_parkme(self);
|
||||
ret = threadfn(data);
|
||||
}
|
||||
do_exit(ret);
|
||||
kthread_exit(ret);
|
||||
}
|
||||
|
||||
/* called from kernel_clone() to get node information for about to be created task */
|
||||
|
@ -628,7 +660,7 @@ EXPORT_SYMBOL_GPL(kthread_park);
|
|||
* instead of calling wake_up_process(): the thread will exit without
|
||||
* calling threadfn().
|
||||
*
|
||||
* If threadfn() may call do_exit() itself, the caller must ensure
|
||||
* If threadfn() may call kthread_exit() itself, the caller must ensure
|
||||
* task_struct can't go away.
|
||||
*
|
||||
* Returns the result of threadfn(), or %-EINTR if wake_up_process()
|
||||
|
@ -647,7 +679,7 @@ int kthread_stop(struct task_struct *k)
|
|||
kthread_unpark(k);
|
||||
wake_up_process(k);
|
||||
wait_for_completion(&kthread->exited);
|
||||
ret = k->exit_code;
|
||||
ret = kthread->result;
|
||||
put_task_struct(k);
|
||||
|
||||
trace_sched_kthread_stop_ret(ret);
|
||||
|
|
|
@ -337,12 +337,12 @@ static inline void add_taint_module(struct module *mod, unsigned flag,
|
|||
* A thread that wants to hold a reference to a module only while it
|
||||
* is running can call this to safely exit. nfsd and lockd use this.
|
||||
*/
|
||||
void __noreturn __module_put_and_exit(struct module *mod, long code)
|
||||
void __noreturn __module_put_and_kthread_exit(struct module *mod, long code)
|
||||
{
|
||||
module_put(mod);
|
||||
do_exit(code);
|
||||
kthread_exit(code);
|
||||
}
|
||||
EXPORT_SYMBOL(__module_put_and_exit);
|
||||
EXPORT_SYMBOL(__module_put_and_kthread_exit);
|
||||
|
||||
/* Find a module section: 0 means not found. */
|
||||
static unsigned int find_sec(const struct load_info *info, const char *name)
|
||||
|
|
|
@ -133,79 +133,6 @@ int __ref profile_init(void)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Profile event notifications */
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
|
||||
static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
|
||||
static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
|
||||
|
||||
void profile_task_exit(struct task_struct *task)
|
||||
{
|
||||
blocking_notifier_call_chain(&task_exit_notifier, 0, task);
|
||||
}
|
||||
|
||||
int profile_handoff_task(struct task_struct *task)
|
||||
{
|
||||
int ret;
|
||||
ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
|
||||
return (ret == NOTIFY_OK) ? 1 : 0;
|
||||
}
|
||||
|
||||
void profile_munmap(unsigned long addr)
|
||||
{
|
||||
blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
|
||||
}
|
||||
|
||||
int task_handoff_register(struct notifier_block *n)
|
||||
{
|
||||
return atomic_notifier_chain_register(&task_free_notifier, n);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(task_handoff_register);
|
||||
|
||||
int task_handoff_unregister(struct notifier_block *n)
|
||||
{
|
||||
return atomic_notifier_chain_unregister(&task_free_notifier, n);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(task_handoff_unregister);
|
||||
|
||||
int profile_event_register(enum profile_type type, struct notifier_block *n)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
|
||||
switch (type) {
|
||||
case PROFILE_TASK_EXIT:
|
||||
err = blocking_notifier_chain_register(
|
||||
&task_exit_notifier, n);
|
||||
break;
|
||||
case PROFILE_MUNMAP:
|
||||
err = blocking_notifier_chain_register(
|
||||
&munmap_notifier, n);
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(profile_event_register);
|
||||
|
||||
int profile_event_unregister(enum profile_type type, struct notifier_block *n)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
|
||||
switch (type) {
|
||||
case PROFILE_TASK_EXIT:
|
||||
err = blocking_notifier_chain_unregister(
|
||||
&task_exit_notifier, n);
|
||||
break;
|
||||
case PROFILE_MUNMAP:
|
||||
err = blocking_notifier_chain_unregister(
|
||||
&munmap_notifier, n);
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(profile_event_unregister);
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
|
||||
/*
|
||||
* Each cpu has a pair of open-addressed hashtables for pending
|
||||
|
|
|
@ -419,8 +419,6 @@ static int ptrace_attach(struct task_struct *task, long request,
|
|||
if (task->ptrace)
|
||||
goto unlock_tasklist;
|
||||
|
||||
if (seize)
|
||||
flags |= PT_SEIZED;
|
||||
task->ptrace = flags;
|
||||
|
||||
ptrace_link(task, current);
|
||||
|
|
|
@ -8642,14 +8642,6 @@ void __init init_idle(struct task_struct *idle, int cpu)
|
|||
|
||||
__sched_fork(0, idle);
|
||||
|
||||
/*
|
||||
* The idle task doesn't need the kthread struct to function, but it
|
||||
* is dressed up as a per-CPU kthread and thus needs to play the part
|
||||
* if we want to avoid special-casing it in code that deals with per-CPU
|
||||
* kthreads.
|
||||
*/
|
||||
set_kthread_struct(idle);
|
||||
|
||||
raw_spin_lock_irqsave(&idle->pi_lock, flags);
|
||||
raw_spin_rq_lock(rq);
|
||||
|
||||
|
@ -9468,6 +9460,14 @@ void __init sched_init(void)
|
|||
mmgrab(&init_mm);
|
||||
enter_lazy_tlb(&init_mm, current);
|
||||
|
||||
/*
|
||||
* The idle task doesn't need the kthread struct to function, but it
|
||||
* is dressed up as a per-CPU kthread and thus needs to play the part
|
||||
* if we want to avoid special-casing it in code that deals with per-CPU
|
||||
* kthreads.
|
||||
*/
|
||||
WARN_ON(!set_kthread_struct(current));
|
||||
|
||||
/*
|
||||
* Make us the idle thread. Technically, schedule() should not be
|
||||
* called from this thread, however somewhere below it might be,
|
||||
|
|
|
@ -626,7 +626,8 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
|
|||
*
|
||||
* All callers have to hold the siglock.
|
||||
*/
|
||||
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
|
||||
int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
|
||||
kernel_siginfo_t *info, enum pid_type *type)
|
||||
{
|
||||
bool resched_timer = false;
|
||||
int signr;
|
||||
|
@ -634,8 +635,10 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
|
|||
/* We only dequeue private signals from ourselves, we don't let
|
||||
* signalfd steal them
|
||||
*/
|
||||
*type = PIDTYPE_PID;
|
||||
signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
|
||||
if (!signr) {
|
||||
*type = PIDTYPE_TGID;
|
||||
signr = __dequeue_signal(&tsk->signal->shared_pending,
|
||||
mask, info, &resched_timer);
|
||||
#ifdef CONFIG_POSIX_TIMERS
|
||||
|
@ -903,8 +906,8 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
|
|||
struct task_struct *t;
|
||||
sigset_t flush;
|
||||
|
||||
if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
|
||||
if (!(signal->flags & SIGNAL_GROUP_EXIT))
|
||||
if (signal->flags & SIGNAL_GROUP_EXIT) {
|
||||
if (signal->core_state)
|
||||
return sig == SIGKILL;
|
||||
/*
|
||||
* The process is in the middle of dying, nothing to do.
|
||||
|
@ -1029,7 +1032,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
|
|||
* then start taking the whole group down immediately.
|
||||
*/
|
||||
if (sig_fatal(p, sig) &&
|
||||
!(signal->flags & SIGNAL_GROUP_EXIT) &&
|
||||
(signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
|
||||
!sigismember(&t->real_blocked, sig) &&
|
||||
(sig == SIGKILL || !p->ptrace)) {
|
||||
/*
|
||||
|
@ -1820,6 +1823,7 @@ int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
|
|||
* force_sig_seccomp - signals the task to allow in-process syscall emulation
|
||||
* @syscall: syscall number to send to userland
|
||||
* @reason: filter-supplied reason code to send to userland (via si_errno)
|
||||
* @force_coredump: true to trigger a coredump
|
||||
*
|
||||
* Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
|
||||
*/
|
||||
|
@ -2383,7 +2387,8 @@ static bool do_signal_stop(int signr)
|
|||
WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
|
||||
|
||||
if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
|
||||
unlikely(signal_group_exit(sig)))
|
||||
unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
|
||||
unlikely(sig->group_exec_task))
|
||||
return false;
|
||||
/*
|
||||
* There is no group stop already in progress. We must
|
||||
|
@ -2544,7 +2549,7 @@ static void do_freezer_trap(void)
|
|||
freezable_schedule();
|
||||
}
|
||||
|
||||
static int ptrace_signal(int signr, kernel_siginfo_t *info)
|
||||
static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
|
||||
{
|
||||
/*
|
||||
* We do not check sig_kernel_stop(signr) but set this marker
|
||||
|
@ -2584,8 +2589,9 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info)
|
|||
}
|
||||
|
||||
/* If the (new) signal is now blocked, requeue it. */
|
||||
if (sigismember(¤t->blocked, signr)) {
|
||||
send_signal(signr, info, current, PIDTYPE_PID);
|
||||
if (sigismember(¤t->blocked, signr) ||
|
||||
fatal_signal_pending(current)) {
|
||||
send_signal(signr, info, current, type);
|
||||
signr = 0;
|
||||
}
|
||||
|
||||
|
@ -2684,18 +2690,20 @@ bool get_signal(struct ksignal *ksig)
|
|||
goto relock;
|
||||
}
|
||||
|
||||
/* Has this task already been marked for death? */
|
||||
if (signal_group_exit(signal)) {
|
||||
ksig->info.si_signo = signr = SIGKILL;
|
||||
sigdelset(¤t->pending.signal, SIGKILL);
|
||||
trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
|
||||
&sighand->action[SIGKILL - 1]);
|
||||
recalc_sigpending();
|
||||
goto fatal;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
struct k_sigaction *ka;
|
||||
enum pid_type type;
|
||||
|
||||
/* Has this task already been marked for death? */
|
||||
if ((signal->flags & SIGNAL_GROUP_EXIT) ||
|
||||
signal->group_exec_task) {
|
||||
ksig->info.si_signo = signr = SIGKILL;
|
||||
sigdelset(¤t->pending.signal, SIGKILL);
|
||||
trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
|
||||
&sighand->action[SIGKILL - 1]);
|
||||
recalc_sigpending();
|
||||
goto fatal;
|
||||
}
|
||||
|
||||
if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
|
||||
do_signal_stop(0))
|
||||
|
@ -2728,16 +2736,18 @@ bool get_signal(struct ksignal *ksig)
|
|||
* so that the instruction pointer in the signal stack
|
||||
* frame points to the faulting instruction.
|
||||
*/
|
||||
type = PIDTYPE_PID;
|
||||
signr = dequeue_synchronous_signal(&ksig->info);
|
||||
if (!signr)
|
||||
signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
|
||||
signr = dequeue_signal(current, ¤t->blocked,
|
||||
&ksig->info, &type);
|
||||
|
||||
if (!signr)
|
||||
break; /* will return 0 */
|
||||
|
||||
if (unlikely(current->ptrace) && (signr != SIGKILL) &&
|
||||
!(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
|
||||
signr = ptrace_signal(signr, &ksig->info);
|
||||
signr = ptrace_signal(signr, &ksig->info, type);
|
||||
if (!signr)
|
||||
continue;
|
||||
}
|
||||
|
@ -2863,13 +2873,13 @@ bool get_signal(struct ksignal *ksig)
|
|||
}
|
||||
|
||||
/**
|
||||
* signal_delivered -
|
||||
* signal_delivered - called after signal delivery to update blocked signals
|
||||
* @ksig: kernel signal struct
|
||||
* @stepping: nonzero if debugger single-step or block-step in use
|
||||
*
|
||||
* This function should be called when a signal has successfully been
|
||||
* delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
|
||||
* is always blocked, and the signal itself is blocked unless %SA_NODEFER
|
||||
* is always blocked), and the signal itself is blocked unless %SA_NODEFER
|
||||
* is set in @ksig->ka.sa.sa_flags. Tracing is notified.
|
||||
*/
|
||||
static void signal_delivered(struct ksignal *ksig, int stepping)
|
||||
|
@ -2942,7 +2952,7 @@ void exit_signals(struct task_struct *tsk)
|
|||
*/
|
||||
cgroup_threadgroup_change_begin(tsk);
|
||||
|
||||
if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
|
||||
if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
|
||||
tsk->flags |= PF_EXITING;
|
||||
cgroup_threadgroup_change_end(tsk);
|
||||
return;
|
||||
|
@ -3562,6 +3572,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
|
|||
ktime_t *to = NULL, timeout = KTIME_MAX;
|
||||
struct task_struct *tsk = current;
|
||||
sigset_t mask = *which;
|
||||
enum pid_type type;
|
||||
int sig, ret = 0;
|
||||
|
||||
if (ts) {
|
||||
|
@ -3578,7 +3589,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
|
|||
signotset(&mask);
|
||||
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
sig = dequeue_signal(tsk, &mask, info);
|
||||
sig = dequeue_signal(tsk, &mask, info, &type);
|
||||
if (!sig && timeout) {
|
||||
/*
|
||||
* None ready, temporarily unblock those we're interested
|
||||
|
@ -3597,7 +3608,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
|
|||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
__set_task_blocked(tsk, &tsk->real_blocked);
|
||||
sigemptyset(&tsk->real_blocked);
|
||||
sig = dequeue_signal(tsk, &mask, info);
|
||||
sig = dequeue_signal(tsk, &mask, info, &type);
|
||||
}
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
|
||||
|
|
|
@ -38,11 +38,10 @@ void bacct_add_tsk(struct user_namespace *user_ns,
|
|||
stats->ac_btime = clamp_t(time64_t, btime, 0, U32_MAX);
|
||||
stats->ac_btime64 = btime;
|
||||
|
||||
if (thread_group_leader(tsk)) {
|
||||
if (tsk->flags & PF_EXITING)
|
||||
stats->ac_exitcode = tsk->exit_code;
|
||||
if (tsk->flags & PF_FORKNOEXEC)
|
||||
stats->ac_flag |= AFORK;
|
||||
}
|
||||
if (thread_group_leader(tsk) && (tsk->flags & PF_FORKNOEXEC))
|
||||
stats->ac_flag |= AFORK;
|
||||
if (tsk->flags & PF_SUPERPRIV)
|
||||
stats->ac_flag |= ASU;
|
||||
if (tsk->flags & PF_DUMPCORE)
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch)
|
||||
{
|
||||
try_catch->try_result = -EFAULT;
|
||||
complete_and_exit(try_catch->try_completion, -EFAULT);
|
||||
kthread_complete_and_exit(try_catch->try_completion, -EFAULT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kunit_try_catch_throw);
|
||||
|
||||
|
@ -27,7 +27,7 @@ static int kunit_generic_run_threadfn_adapter(void *data)
|
|||
|
||||
try_catch->try(try_catch->context);
|
||||
|
||||
complete_and_exit(try_catch->try_completion, 0);
|
||||
kthread_complete_and_exit(try_catch->try_completion, 0);
|
||||
}
|
||||
|
||||
static unsigned long kunit_test_timeout(void)
|
||||
|
|
|
@ -2935,7 +2935,6 @@ EXPORT_SYMBOL(vm_munmap);
|
|||
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
|
||||
{
|
||||
addr = untagged_addr(addr);
|
||||
profile_munmap(addr);
|
||||
return __vm_munmap(addr, len, true);
|
||||
}
|
||||
|
||||
|
|
|
@ -793,7 +793,7 @@ static inline bool __task_will_free_mem(struct task_struct *task)
|
|||
* coredump_task_exit(), so the oom killer cannot assume that
|
||||
* the process will promptly exit and release memory.
|
||||
*/
|
||||
if (sig->flags & SIGNAL_GROUP_COREDUMP)
|
||||
if (sig->core_state)
|
||||
return false;
|
||||
|
||||
if (sig->flags & SIGNAL_GROUP_EXIT)
|
||||
|
|
|
@ -535,7 +535,7 @@ static int bnep_session(void *arg)
|
|||
|
||||
up_write(&bnep_session_sem);
|
||||
free_netdev(dev);
|
||||
module_put_and_exit(0);
|
||||
module_put_and_kthread_exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -323,7 +323,7 @@ static int cmtp_session(void *arg)
|
|||
up_write(&cmtp_session_sem);
|
||||
|
||||
kfree(session);
|
||||
module_put_and_exit(0);
|
||||
module_put_and_kthread_exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1305,7 +1305,7 @@ static int hidp_session_thread(void *arg)
|
|||
l2cap_unregister_user(session->conn, &session->user);
|
||||
hidp_session_put(session);
|
||||
|
||||
module_put_and_exit(0);
|
||||
module_put_and_kthread_exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -168,14 +168,16 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
|
|||
"panic",
|
||||
"do_exit",
|
||||
"do_task_dead",
|
||||
"__module_put_and_exit",
|
||||
"complete_and_exit",
|
||||
"kthread_exit",
|
||||
"make_task_dead",
|
||||
"__module_put_and_kthread_exit",
|
||||
"kthread_complete_and_exit",
|
||||
"__reiserfs_panic",
|
||||
"lbug_with_loc",
|
||||
"fortify_panic",
|
||||
"usercopy_abort",
|
||||
"machine_real_restart",
|
||||
"rewind_stack_do_exit",
|
||||
"rewind_stack_and_make_dead",
|
||||
"kunit_try_catch_throw",
|
||||
"xen_start_kernel",
|
||||
"cpu_bringup_and_idle",
|
||||
|
|
Loading…
Reference in New Issue