2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* workqueue.h --- work queue handling for Linux.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_WORKQUEUE_H
|
|
|
|
#define _LINUX_WORKQUEUE_H
|
|
|
|
|
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <linux/bitops.h>
|
2006-12-17 01:53:50 +08:00
|
|
|
#include <asm/atomic.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
struct workqueue_struct;
|
|
|
|
|
2006-11-22 22:55:48 +08:00
|
|
|
struct work_struct;
|
|
|
|
typedef void (*work_func_t)(struct work_struct *work);
|
2006-11-22 22:54:45 +08:00
|
|
|
|
2006-12-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* The first word is the work queue pointer and the flags rolled into
|
|
|
|
* one
|
|
|
|
*/
|
|
|
|
#define work_data_bits(work) ((unsigned long *)(&(work)->data))
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct work_struct {
|
2006-12-17 01:53:50 +08:00
|
|
|
atomic_long_t data;
|
2006-11-22 22:54:49 +08:00
|
|
|
#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
|
|
|
|
#define WORK_STRUCT_FLAG_MASK (3UL)
|
|
|
|
#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
|
2005-04-17 06:20:36 +08:00
|
|
|
struct list_head entry;
|
2006-11-22 22:54:45 +08:00
|
|
|
work_func_t func;
|
2006-11-22 22:54:01 +08:00
|
|
|
};
|
|
|
|
|
2007-05-09 17:34:19 +08:00
|
|
|
#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
|
2006-12-17 01:53:50 +08:00
|
|
|
|
2006-11-22 22:54:01 +08:00
|
|
|
struct delayed_work {
|
|
|
|
struct work_struct work;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct timer_list timer;
|
|
|
|
};
|
|
|
|
|
2006-02-24 02:43:43 +08:00
|
|
|
struct execute_work {
|
|
|
|
struct work_struct work;
|
|
|
|
};
|
|
|
|
|
2006-11-22 22:55:48 +08:00
|
|
|
#define __WORK_INITIALIZER(n, f) { \
|
2007-05-09 17:34:19 +08:00
|
|
|
.data = WORK_DATA_INIT(), \
|
|
|
|
.entry = { &(n).entry, &(n).entry }, \
|
2006-11-22 22:55:48 +08:00
|
|
|
.func = (f), \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define __DELAYED_WORK_INITIALIZER(n, f) { \
|
|
|
|
.work = __WORK_INITIALIZER((n).work, (f)), \
|
|
|
|
.timer = TIMER_INITIALIZER(NULL, 0, 0), \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DECLARE_WORK(n, f) \
|
|
|
|
struct work_struct n = __WORK_INITIALIZER(n, f)
|
|
|
|
|
|
|
|
#define DECLARE_DELAYED_WORK(n, f) \
|
|
|
|
struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2006-11-22 22:55:48 +08:00
|
|
|
* initialize a work item's function pointer
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2006-11-22 22:55:48 +08:00
|
|
|
#define PREPARE_WORK(_work, _func) \
|
2005-04-17 06:20:36 +08:00
|
|
|
do { \
|
2006-11-22 22:54:01 +08:00
|
|
|
(_work)->func = (_func); \
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (0)
|
|
|
|
|
2006-11-22 22:55:48 +08:00
|
|
|
#define PREPARE_DELAYED_WORK(_work, _func) \
|
|
|
|
PREPARE_WORK(&(_work)->work, (_func))
|
2006-11-22 22:54:01 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2006-11-22 22:54:01 +08:00
|
|
|
* initialize all of a work item in one go
|
2006-12-17 01:53:50 +08:00
|
|
|
*
|
|
|
|
* NOTE! No point in using "atomic_long_set()": useing a direct
|
|
|
|
* assignment of the work data initializer allows the compiler
|
|
|
|
* to generate better code.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-05-09 17:34:19 +08:00
|
|
|
#define INIT_WORK(_work, _func) \
|
2006-11-22 22:55:48 +08:00
|
|
|
do { \
|
2007-05-09 17:34:19 +08:00
|
|
|
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
|
2006-11-22 22:55:48 +08:00
|
|
|
INIT_LIST_HEAD(&(_work)->entry); \
|
|
|
|
PREPARE_WORK((_work), (_func)); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define INIT_DELAYED_WORK(_work, _func) \
|
|
|
|
do { \
|
|
|
|
INIT_WORK(&(_work)->work, (_func)); \
|
|
|
|
init_timer(&(_work)->timer); \
|
2006-11-22 22:54:01 +08:00
|
|
|
} while (0)
|
|
|
|
|
2007-05-08 15:27:47 +08:00
|
|
|
#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
|
|
|
|
do { \
|
|
|
|
INIT_WORK(&(_work)->work, (_func)); \
|
|
|
|
init_timer_deferrable(&(_work)->timer); \
|
|
|
|
} while (0)
|
|
|
|
|
2006-11-22 22:54:49 +08:00
|
|
|
/**
|
|
|
|
* work_pending - Find out whether a work item is currently pending
|
|
|
|
* @work: The work item in question
|
|
|
|
*/
|
|
|
|
#define work_pending(work) \
|
2006-12-17 01:53:50 +08:00
|
|
|
test_bit(WORK_STRUCT_PENDING, work_data_bits(work))
|
2006-11-22 22:54:49 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* delayed_work_pending - Find out whether a delayable work item is currently
|
|
|
|
* pending
|
|
|
|
* @work: The work item in question
|
|
|
|
*/
|
2006-12-16 06:13:51 +08:00
|
|
|
#define delayed_work_pending(w) \
|
|
|
|
work_pending(&(w)->work)
|
2006-11-22 22:54:49 +08:00
|
|
|
|
2006-11-22 22:55:48 +08:00
|
|
|
/**
|
2007-05-09 17:34:19 +08:00
|
|
|
* work_clear_pending - for internal use only, mark a work item as not pending
|
|
|
|
* @work: The work item in question
|
2006-11-22 22:55:48 +08:00
|
|
|
*/
|
2007-05-09 17:34:19 +08:00
|
|
|
#define work_clear_pending(work) \
|
2006-12-17 01:53:50 +08:00
|
|
|
clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
|
2006-11-22 22:55:48 +08:00
|
|
|
|
2006-11-22 22:54:01 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
extern struct workqueue_struct *__create_workqueue(const char *name,
|
2006-12-07 12:34:49 +08:00
|
|
|
int singlethread,
|
|
|
|
int freezeable);
|
|
|
|
#define create_workqueue(name) __create_workqueue((name), 0, 0)
|
|
|
|
#define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1)
|
|
|
|
#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
extern void destroy_workqueue(struct workqueue_struct *wq);
|
|
|
|
|
|
|
|
extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
|
2006-11-22 22:54:01 +08:00
|
|
|
extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
|
2006-06-29 04:50:33 +08:00
|
|
|
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
2006-11-22 22:54:01 +08:00
|
|
|
struct delayed_work *work, unsigned long delay);
|
2005-04-17 06:20:36 +08:00
|
|
|
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
|
implement flush_work()
A basic problem with flush_scheduled_work() is that it blocks behind _all_
presently-queued works, rather than just the work whcih the caller wants to
flush. If the caller holds some lock, and if one of the queued work happens
to want that lock as well then accidental deadlocks can occur.
One example of this is the phy layer: it wants to flush work while holding
rtnl_lock(). But if a linkwatch event happens to be queued, the phy code will
deadlock because the linkwatch callback function takes rtnl_lock.
So we implement a new function which will flush a *single* work - just the one
which the caller wants to free up. Thus we avoid the accidental deadlocks
which can arise from unrelated subsystems' callbacks taking shared locks.
flush_work() non-blockingly dequeues the work_struct which we want to kill,
then it waits for its handler to complete on all CPUs.
Add ->current_work to the "struct cpu_workqueue_struct", it points to
currently running "struct work_struct". When flush_work(work) detects
->current_work == work, it inserts a barrier at the _head_ of ->worklist
(and thus right _after_ that work) and waits for completition. This means
that the next work fired on that CPU will be this barrier, or another
barrier queued by concurrent flush_work(), so the caller of flush_work()
will be woken before any "regular" work has a chance to run.
When wait_on_work() unlocks workqueue_mutex (or whatever we choose to protect
against CPU hotplug), CPU may go away. But in that case take_over_work() will
move a barrier we queued to another CPU, it will be fired sometime, and
wait_on_work() will be woken.
Actually, we are doing cleanup_workqueue_thread()->kthread_stop() before
take_over_work(), so cwq->thread should complete its ->worklist (and thus
the barrier), because currently we don't check kthread_should_stop() in
run_workqueue(). But even if we did, everything should be ok.
[akpm@osdl.org: cleanup]
[akpm@osdl.org: add flush_work_keventd() wrapper]
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 17:33:52 +08:00
|
|
|
extern void flush_work(struct workqueue_struct *wq, struct work_struct *work);
|
|
|
|
extern void flush_work_keventd(struct work_struct *work);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
extern int FASTCALL(schedule_work(struct work_struct *work));
|
2006-11-22 22:54:01 +08:00
|
|
|
extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-11-22 22:54:01 +08:00
|
|
|
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
|
2006-11-22 22:55:48 +08:00
|
|
|
extern int schedule_on_each_cpu(work_func_t func);
|
2005-04-17 06:20:36 +08:00
|
|
|
extern void flush_scheduled_work(void);
|
|
|
|
extern int current_is_keventd(void);
|
|
|
|
extern int keventd_up(void);
|
|
|
|
|
|
|
|
extern void init_workqueues(void);
|
2006-11-22 22:55:48 +08:00
|
|
|
int execute_in_process_context(work_func_t fn, struct execute_work *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Kill off a pending schedule_delayed_work(). Note that the work callback
|
2007-04-27 06:45:32 +08:00
|
|
|
* function may still be running on return from cancel_delayed_work(), unless
|
|
|
|
* it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
|
implement flush_work()
A basic problem with flush_scheduled_work() is that it blocks behind _all_
presently-queued works, rather than just the work whcih the caller wants to
flush. If the caller holds some lock, and if one of the queued work happens
to want that lock as well then accidental deadlocks can occur.
One example of this is the phy layer: it wants to flush work while holding
rtnl_lock(). But if a linkwatch event happens to be queued, the phy code will
deadlock because the linkwatch callback function takes rtnl_lock.
So we implement a new function which will flush a *single* work - just the one
which the caller wants to free up. Thus we avoid the accidental deadlocks
which can arise from unrelated subsystems' callbacks taking shared locks.
flush_work() non-blockingly dequeues the work_struct which we want to kill,
then it waits for its handler to complete on all CPUs.
Add ->current_work to the "struct cpu_workqueue_struct", it points to
currently running "struct work_struct". When flush_work(work) detects
->current_work == work, it inserts a barrier at the _head_ of ->worklist
(and thus right _after_ that work) and waits for completition. This means
that the next work fired on that CPU will be this barrier, or another
barrier queued by concurrent flush_work(), so the caller of flush_work()
will be woken before any "regular" work has a chance to run.
When wait_on_work() unlocks workqueue_mutex (or whatever we choose to protect
against CPU hotplug), CPU may go away. But in that case take_over_work() will
move a barrier we queued to another CPU, it will be fired sometime, and
wait_on_work() will be woken.
Actually, we are doing cleanup_workqueue_thread()->kthread_stop() before
take_over_work(), so cwq->thread should complete its ->worklist (and thus
the barrier), because currently we don't check kthread_should_stop() in
run_workqueue(). But even if we did, everything should be ok.
[akpm@osdl.org: cleanup]
[akpm@osdl.org: add flush_work_keventd() wrapper]
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 17:33:52 +08:00
|
|
|
* flush_work() or cancel_work_sync() to wait on it.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2006-11-22 22:54:01 +08:00
|
|
|
static inline int cancel_delayed_work(struct delayed_work *work)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2007-04-27 06:45:32 +08:00
|
|
|
ret = del_timer(&work->timer);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ret)
|
2007-05-09 17:34:19 +08:00
|
|
|
work_clear_pending(&work->work);
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-05-09 17:34:18 +08:00
|
|
|
extern void cancel_rearming_delayed_work(struct delayed_work *work);
|
|
|
|
|
|
|
|
/* Obsolete. use cancel_rearming_delayed_work() */
|
|
|
|
static inline
|
|
|
|
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
|
|
|
|
struct delayed_work *work)
|
|
|
|
{
|
|
|
|
cancel_rearming_delayed_work(work);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|