arch-cleanup-2020-10-22
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl+SOXIQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgptrcD/93VUDmRAn73ChKNd0TtXUicJlAlNLVjvfs VFTXWBDnlJnGkZT7ElkDD9b8dsz8l4xGf/QZ5dzhC/th2OsfObQkSTfe0lv5cCQO mX7CRSrDpjaHtW+WGPDa0oQsGgIfpqUz2IOg9NKbZZ1LJ2uzYfdOcf3oyRgwZJ9B I3sh1vP6OzjZVVCMmtMTM+sYZEsDoNwhZwpkpiwMmj8tYtOPgKCYKpqCiXrGU0x2 ML5FtDIwiwU+O3zYYdCBWqvCb2Db0iA9Aov2whEBz/V2jnmrN5RMA/90UOh1E2zG br4wM1Wt3hNrtj5qSxZGlF/HEMYJVB8Z2SgMjYu4vQz09qRVVqpGdT/dNvLAHQWg w4xNCj071kVZDQdfwnqeWSKYUau9Xskvi8xhTT+WX8a5CsbVrM9vGslnS5XNeZ6p h2D3Q+TAYTvT756icTl0qsYVP7PrPY7DdmQYu0q+Lc3jdGI+jyxO2h9OFBRLZ3p6 zFX2N8wkvvCCzP2DwVnnhIi/GovpSh7ksHnb039F36Y/IhZPqV1bGqdNQVdanv6I 8fcIDM6ltRQ7dO2Br5f1tKUZE9Pm6x60b/uRVjhfVh65uTEKyGRhcm5j9ztzvQfI cCBg4rbVRNKolxuDEkjsAFXVoiiEEsb7pLf4pMO+Dr62wxFG589tQNySySneUIVZ J9ILnGAAeQ== =aVWo -----END PGP SIGNATURE----- Merge tag 'arch-cleanup-2020-10-22' of git://git.kernel.dk/linux-block Pull arch task_work cleanups from Jens Axboe: "Two cleanups that don't fit other categories: - Finally get the task_work_add() cleanup done properly, so we don't have random 0/1/false/true/TWA_SIGNAL confusing use cases. Updates all callers, and also fixes up the documentation for task_work_add(). - While working on some TIF related changes for 5.11, this TIF_NOTIFY_RESUME cleanup fell out of that. Remove some arch duplication for how that is handled" * tag 'arch-cleanup-2020-10-22' of git://git.kernel.dk/linux-block: task_work: cleanup notification modes tracehook: clear TIF_NOTIFY_RESUME in tracehook_notify_resume()
This commit is contained in:
commit
4a22709e21
|
@ -531,7 +531,6 @@ do_work_pending(struct pt_regs *regs, unsigned long thread_flags,
|
|||
do_signal(regs, r0, r19);
|
||||
r0 = 0;
|
||||
} else {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -394,6 +394,6 @@ void do_notify_resume(struct pt_regs *regs)
|
|||
* ASM glue gaurantees that this is only called when returning to
|
||||
* user mode
|
||||
*/
|
||||
if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
|
|
|
@ -669,7 +669,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
|
|||
} else if (thread_flags & _TIF_UPROBE) {
|
||||
uprobe_notify_resume(regs);
|
||||
} else {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
|
|
@ -946,7 +946,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
|||
do_signal(regs);
|
||||
|
||||
if (thread_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
|
|
@ -316,8 +316,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags,
|
|||
if (thread_info_flags & (1 << TIF_SIGPENDING))
|
||||
do_signal(regs, syscall);
|
||||
|
||||
if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
if (thread_info_flags & (1 << TIF_NOTIFY_RESUME))
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -261,7 +261,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
|||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
|
|
@ -282,8 +282,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
|
|||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -180,7 +180,6 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
|
|||
}
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -176,7 +176,7 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
|
|||
ia64_do_signal(scr, in_syscall);
|
||||
}
|
||||
|
||||
if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME)) {
|
||||
local_irq_enable(); /* force interrupt enable */
|
||||
tracehook_notify_resume(&scr->pt);
|
||||
}
|
||||
|
|
|
@ -1136,6 +1136,6 @@ void do_notify_resume(struct pt_regs *regs)
|
|||
if (test_thread_flag(TIF_SIGPENDING))
|
||||
do_signal(regs);
|
||||
|
||||
if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
|
|
|
@ -316,6 +316,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall)
|
|||
if (test_thread_flag(TIF_SIGPENDING))
|
||||
do_signal(regs, in_syscall);
|
||||
|
||||
if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
|
|
|
@ -907,7 +907,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
|
|||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
|
|
@ -379,8 +379,6 @@ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags)
|
|||
if (thread_flags & _TIF_SIGPENDING)
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
if (thread_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -317,7 +317,7 @@ asmlinkage int do_notify_resume(struct pt_regs *regs)
|
|||
*/
|
||||
return restart;
|
||||
}
|
||||
} else if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
|
||||
} else if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
tracehook_notify_resume(regs);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -311,7 +311,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
|
|||
}
|
||||
syscall = 0;
|
||||
} else {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -606,8 +606,6 @@ void do_notify_resume(struct pt_regs *regs, long in_syscall)
|
|||
if (test_thread_flag(TIF_SIGPENDING))
|
||||
do_signal(regs, in_syscall);
|
||||
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME)) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -324,7 +324,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
|
|||
}
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
|
|
@ -313,8 +313,6 @@ asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
|
|||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -535,7 +535,6 @@ void do_signal(struct pt_regs *regs)
|
|||
|
||||
void do_notify_resume(struct pt_regs *regs)
|
||||
{
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
|
|
@ -502,8 +502,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
|
|||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
do_signal(regs, save_r0);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -523,11 +523,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
|
|||
{
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
do_signal(regs, orig_i0);
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
}
|
||||
|
||||
asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr,
|
||||
struct sigstack __user *ossptr,
|
||||
|
|
|
@ -551,10 +551,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
|
|||
uprobe_notify_resume(regs);
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
do_signal(regs, orig_i0);
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
user_enter();
|
||||
}
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ void interrupt_end(void)
|
|||
schedule();
|
||||
if (test_thread_flag(TIF_SIGPENDING))
|
||||
do_signal(regs);
|
||||
if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
|
||||
|
|
|
@ -1277,7 +1277,7 @@ static void queue_task_work(struct mce *m, int kill_it)
|
|||
else
|
||||
current->mce_kill_me.func = kill_me_maybe;
|
||||
|
||||
task_work_add(current, ¤t->mce_kill_me, true);
|
||||
task_work_add(current, ¤t->mce_kill_me, TWA_RESUME);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -561,7 +561,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
|
|||
* callback has been invoked.
|
||||
*/
|
||||
atomic_inc(&rdtgrp->waitcount);
|
||||
ret = task_work_add(tsk, &callback->work, true);
|
||||
ret = task_work_add(tsk, &callback->work, TWA_RESUME);
|
||||
if (ret) {
|
||||
/*
|
||||
* Task is exiting. Drop the refcount and free the callback.
|
||||
|
|
|
@ -501,6 +501,6 @@ void do_notify_resume(struct pt_regs *regs)
|
|||
if (test_thread_flag(TIF_SIGPENDING))
|
||||
do_signal(regs);
|
||||
|
||||
if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
|
|
|
@ -942,7 +942,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
|
|||
estatus_node->task_work.func = ghes_kick_task_work;
|
||||
estatus_node->task_work_cpu = smp_processor_id();
|
||||
ret = task_work_add(current, &estatus_node->task_work,
|
||||
true);
|
||||
TWA_RESUME);
|
||||
if (ret)
|
||||
estatus_node->task_work.func = NULL;
|
||||
}
|
||||
|
|
|
@ -2229,7 +2229,7 @@ static void binder_deferred_fd_close(int fd)
|
|||
__close_fd_get_file(fd, &twcb->file);
|
||||
if (twcb->file) {
|
||||
filp_close(twcb->file, current->files);
|
||||
task_work_add(current, &twcb->twork, true);
|
||||
task_work_add(current, &twcb->twork, TWA_RESUME);
|
||||
} else {
|
||||
kfree(twcb);
|
||||
}
|
||||
|
|
|
@ -339,7 +339,7 @@ void fput_many(struct file *file, unsigned int refs)
|
|||
|
||||
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
|
||||
init_task_work(&file->f_u.fu_rcuhead, ____fput);
|
||||
if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
|
||||
if (!task_work_add(task, &file->f_u.fu_rcuhead, TWA_RESUME))
|
||||
return;
|
||||
/*
|
||||
* After this task has run exit_task_work(),
|
||||
|
|
|
@ -1976,7 +1976,8 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
|
|||
{
|
||||
struct task_struct *tsk = req->task;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
int ret, notify;
|
||||
enum task_work_notify_mode notify;
|
||||
int ret;
|
||||
|
||||
if (tsk->flags & PF_EXITING)
|
||||
return -ESRCH;
|
||||
|
@ -1987,7 +1988,7 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
|
|||
* processing task_work. There's no reliable way to tell if TWA_RESUME
|
||||
* will do the job.
|
||||
*/
|
||||
notify = 0;
|
||||
notify = TWA_NONE;
|
||||
if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
|
||||
notify = TWA_SIGNAL;
|
||||
|
||||
|
@ -2056,7 +2057,7 @@ static void io_req_task_queue(struct io_kiocb *req)
|
|||
|
||||
init_task_work(&req->task_work, io_req_task_cancel);
|
||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||
task_work_add(tsk, &req->task_work, 0);
|
||||
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||
wake_up_process(tsk);
|
||||
}
|
||||
}
|
||||
|
@ -2177,7 +2178,7 @@ static void io_free_req_deferred(struct io_kiocb *req)
|
|||
struct task_struct *tsk;
|
||||
|
||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||
task_work_add(tsk, &req->task_work, 0);
|
||||
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||
wake_up_process(tsk);
|
||||
}
|
||||
}
|
||||
|
@ -3291,7 +3292,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
|
|||
/* queue just for cancelation */
|
||||
init_task_work(&req->task_work, io_req_task_cancel);
|
||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||
task_work_add(tsk, &req->task_work, 0);
|
||||
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||
wake_up_process(tsk);
|
||||
}
|
||||
return 1;
|
||||
|
@ -4857,7 +4858,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
|
|||
|
||||
WRITE_ONCE(poll->canceled, true);
|
||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||
task_work_add(tsk, &req->task_work, 0);
|
||||
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||
wake_up_process(tsk);
|
||||
}
|
||||
return 1;
|
||||
|
|
|
@ -1191,7 +1191,7 @@ static void mntput_no_expire(struct mount *mnt)
|
|||
struct task_struct *task = current;
|
||||
if (likely(!(task->flags & PF_KTHREAD))) {
|
||||
init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
|
||||
if (!task_work_add(task, &mnt->mnt_rcu, true))
|
||||
if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
|
||||
return;
|
||||
}
|
||||
if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
|
||||
|
|
|
@ -13,9 +13,14 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
|
|||
twork->func = func;
|
||||
}
|
||||
|
||||
#define TWA_RESUME 1
|
||||
#define TWA_SIGNAL 2
|
||||
int task_work_add(struct task_struct *task, struct callback_head *twork, int);
|
||||
enum task_work_notify_mode {
|
||||
TWA_NONE,
|
||||
TWA_RESUME,
|
||||
TWA_SIGNAL,
|
||||
};
|
||||
|
||||
int task_work_add(struct task_struct *task, struct callback_head *twork,
|
||||
enum task_work_notify_mode mode);
|
||||
|
||||
struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
|
||||
void task_work_run(void);
|
||||
|
|
|
@ -178,9 +178,9 @@ static inline void set_notify_resume(struct task_struct *task)
|
|||
*/
|
||||
static inline void tracehook_notify_resume(struct pt_regs *regs)
|
||||
{
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
/*
|
||||
* The caller just cleared TIF_NOTIFY_RESUME. This barrier
|
||||
* pairs with task_work_add()->set_notify_resume() after
|
||||
* This barrier pairs with task_work_add()->set_notify_resume() after
|
||||
* hlist_add_head(task->task_works);
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
|
|
|
@ -161,7 +161,6 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
|
|||
arch_do_signal(regs);
|
||||
|
||||
if (ti_work & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
|
|
@ -16,10 +16,8 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
|
|||
if (ti_work & _TIF_NEED_RESCHED)
|
||||
schedule();
|
||||
|
||||
if (ti_work & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
if (ti_work & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(NULL);
|
||||
}
|
||||
|
||||
ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work);
|
||||
if (ret)
|
||||
|
|
|
@ -1823,7 +1823,7 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
|
|||
|
||||
t->utask->dup_xol_addr = area->vaddr;
|
||||
init_task_work(&t->utask->dup_xol_work, dup_xol_work);
|
||||
task_work_add(t, &t->utask->dup_xol_work, true);
|
||||
task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1162,7 +1162,7 @@ static int irq_thread(void *data)
|
|||
handler_fn = irq_thread_fn;
|
||||
|
||||
init_task_work(&on_exit_work, irq_thread_dtor);
|
||||
task_work_add(current, &on_exit_work, false);
|
||||
task_work_add(current, &on_exit_work, TWA_NONE);
|
||||
|
||||
irq_thread_check_affinity(desc, action);
|
||||
|
||||
|
|
|
@ -2928,7 +2928,7 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr)
|
|||
curr->node_stamp += period;
|
||||
|
||||
if (!time_before(jiffies, curr->mm->numa_next_scan))
|
||||
task_work_add(curr, work, true);
|
||||
task_work_add(curr, work, TWA_RESUME);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,23 +9,28 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
|
|||
* task_work_add - ask the @task to execute @work->func()
|
||||
* @task: the task which should run the callback
|
||||
* @work: the callback to run
|
||||
* @notify: send the notification if true
|
||||
* @notify: how to notify the targeted task
|
||||
*
|
||||
* Queue @work for task_work_run() below and notify the @task if @notify.
|
||||
* Fails if the @task is exiting/exited and thus it can't process this @work.
|
||||
* Otherwise @work->func() will be called when the @task returns from kernel
|
||||
* mode or exits.
|
||||
* Queue @work for task_work_run() below and notify the @task if @notify
|
||||
* is @TWA_RESUME or @TWA_SIGNAL. @TWA_SIGNAL works like signals, in that the
|
||||
* it will interrupt the targeted task and run the task_work. @TWA_RESUME
|
||||
* work is run only when the task exits the kernel and returns to user mode,
|
||||
* or before entering guest mode. Fails if the @task is exiting/exited and thus
|
||||
* it can't process this @work. Otherwise @work->func() will be called when the
|
||||
* @task goes through one of the aforementioned transitions, or exits.
|
||||
*
|
||||
* This is like the signal handler which runs in kernel mode, but it doesn't
|
||||
* try to wake up the @task.
|
||||
* If the targeted task is exiting, then an error is returned and the work item
|
||||
* is not queued. It's up to the caller to arrange for an alternative mechanism
|
||||
* in that case.
|
||||
*
|
||||
* Note: there is no ordering guarantee on works queued here.
|
||||
* Note: there is no ordering guarantee on works queued here. The task_work
|
||||
* list is LIFO.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 if succeeds or -ESRCH.
|
||||
*/
|
||||
int
|
||||
task_work_add(struct task_struct *task, struct callback_head *work, int notify)
|
||||
int task_work_add(struct task_struct *task, struct callback_head *work,
|
||||
enum task_work_notify_mode notify)
|
||||
{
|
||||
struct callback_head *head;
|
||||
unsigned long flags;
|
||||
|
@ -38,6 +43,8 @@ task_work_add(struct task_struct *task, struct callback_head *work, int notify)
|
|||
} while (cmpxchg(&task->task_works, head, work) != head);
|
||||
|
||||
switch (notify) {
|
||||
case TWA_NONE:
|
||||
break;
|
||||
case TWA_RESUME:
|
||||
set_notify_resume(task);
|
||||
break;
|
||||
|
@ -54,6 +61,9 @@ task_work_add(struct task_struct *task, struct callback_head *work, int notify)
|
|||
unlock_task_sighand(task, &flags);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1693,7 +1693,7 @@ long keyctl_session_to_parent(void)
|
|||
|
||||
/* the replacement session keyring is applied just prior to userspace
|
||||
* restarting */
|
||||
ret = task_work_add(parent, newwork, true);
|
||||
ret = task_work_add(parent, newwork, TWA_RESUME);
|
||||
if (!ret)
|
||||
newwork = NULL;
|
||||
unlock:
|
||||
|
|
|
@ -99,7 +99,7 @@ static void report_access(const char *access, struct task_struct *target,
|
|||
info->access = access;
|
||||
info->target = target;
|
||||
info->agent = agent;
|
||||
if (task_work_add(current, &info->work, true) == 0)
|
||||
if (task_work_add(current, &info->work, TWA_RESUME) == 0)
|
||||
return; /* success */
|
||||
|
||||
WARN(1, "report_access called from exiting task");
|
||||
|
|
Loading…
Reference in New Issue