mirror of https://gitee.com/openkylin/linux.git
workqueue: use mod_delayed_work() instead of __cancel + queue
Now that mod_delayed_work() is safe to call from IRQ handlers, __cancel_delayed_work() followed by queue_delayed_work() can be replaced with mod_delayed_work(). Most conversions are straight-forward except for the following. * net/core/link_watch.c: linkwatch_schedule_work() was doing a quite elaborate dancing around its delayed_work. Collapse it such that linkwatch_work is queued for immediate execution if LW_URGENT and existing timer is kept otherwise. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Tomi Valkeinen <tomi.valkeinen@ti.com>
This commit is contained in:
parent
e0aecdd874
commit
e7c2f96744
|
@ -319,10 +319,8 @@ EXPORT_SYMBOL(__blk_run_queue);
|
||||||
*/
|
*/
|
||||||
void blk_run_queue_async(struct request_queue *q)
|
void blk_run_queue_async(struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (likely(!blk_queue_stopped(q))) {
|
if (likely(!blk_queue_stopped(q)))
|
||||||
__cancel_delayed_work(&q->delay_work);
|
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
|
||||||
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_run_queue_async);
|
EXPORT_SYMBOL(blk_run_queue_async);
|
||||||
|
|
||||||
|
|
|
@ -929,12 +929,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
|
||||||
|
|
||||||
/* schedule work if limits changed even if no bio is queued */
|
/* schedule work if limits changed even if no bio is queued */
|
||||||
if (total_nr_queued(td) || td->limits_changed) {
|
if (total_nr_queued(td) || td->limits_changed) {
|
||||||
/*
|
mod_delayed_work(kthrotld_workqueue, dwork, delay);
|
||||||
* We might have a work scheduled to be executed in future.
|
|
||||||
* Cancel that and schedule a new one.
|
|
||||||
*/
|
|
||||||
__cancel_delayed_work(dwork);
|
|
||||||
queue_delayed_work(kthrotld_workqueue, dwork, delay);
|
|
||||||
throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
|
throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
|
||||||
delay, jiffies);
|
delay, jiffies);
|
||||||
}
|
}
|
||||||
|
|
|
@ -672,7 +672,6 @@ static void __reschedule_timeout(int drive, const char *message)
|
||||||
|
|
||||||
if (drive == current_reqD)
|
if (drive == current_reqD)
|
||||||
drive = current_drive;
|
drive = current_drive;
|
||||||
__cancel_delayed_work(&fd_timeout);
|
|
||||||
|
|
||||||
if (drive < 0 || drive >= N_DRIVE) {
|
if (drive < 0 || drive >= N_DRIVE) {
|
||||||
delay = 20UL * HZ;
|
delay = 20UL * HZ;
|
||||||
|
@ -680,7 +679,7 @@ static void __reschedule_timeout(int drive, const char *message)
|
||||||
} else
|
} else
|
||||||
delay = UDP->timeout;
|
delay = UDP->timeout;
|
||||||
|
|
||||||
queue_delayed_work(floppy_wq, &fd_timeout, delay);
|
mod_delayed_work(floppy_wq, &fd_timeout, delay);
|
||||||
if (UDP->flags & FD_DEBUG)
|
if (UDP->flags & FD_DEBUG)
|
||||||
DPRINT("reschedule timeout %s\n", message);
|
DPRINT("reschedule timeout %s\n", message);
|
||||||
timeout_message = message;
|
timeout_message = message;
|
||||||
|
|
|
@ -2013,12 +2013,10 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
|
||||||
if (time_after(mad_agent_priv->timeout,
|
if (time_after(mad_agent_priv->timeout,
|
||||||
mad_send_wr->timeout)) {
|
mad_send_wr->timeout)) {
|
||||||
mad_agent_priv->timeout = mad_send_wr->timeout;
|
mad_agent_priv->timeout = mad_send_wr->timeout;
|
||||||
__cancel_delayed_work(&mad_agent_priv->timed_work);
|
|
||||||
delay = mad_send_wr->timeout - jiffies;
|
delay = mad_send_wr->timeout - jiffies;
|
||||||
if ((long)delay <= 0)
|
if ((long)delay <= 0)
|
||||||
delay = 1;
|
delay = 1;
|
||||||
queue_delayed_work(mad_agent_priv->qp_info->
|
mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
|
||||||
port_priv->wq,
|
|
||||||
&mad_agent_priv->timed_work, delay);
|
&mad_agent_priv->timed_work, delay);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2052,12 +2050,10 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
|
||||||
list_add(&mad_send_wr->agent_list, list_item);
|
list_add(&mad_send_wr->agent_list, list_item);
|
||||||
|
|
||||||
/* Reschedule a work item if we have a shorter timeout */
|
/* Reschedule a work item if we have a shorter timeout */
|
||||||
if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
|
if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
|
||||||
__cancel_delayed_work(&mad_agent_priv->timed_work);
|
mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
|
||||||
queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
|
|
||||||
&mad_agent_priv->timed_work, delay);
|
&mad_agent_priv->timed_work, delay);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
|
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
|
||||||
int timeout_ms)
|
int timeout_ms)
|
||||||
|
|
|
@ -156,8 +156,7 @@ static irqreturn_t qt2160_irq(int irq, void *_qt2160)
|
||||||
|
|
||||||
spin_lock_irqsave(&qt2160->lock, flags);
|
spin_lock_irqsave(&qt2160->lock, flags);
|
||||||
|
|
||||||
__cancel_delayed_work(&qt2160->dwork);
|
mod_delayed_work(system_wq, &qt2160->dwork, 0);
|
||||||
schedule_delayed_work(&qt2160->dwork, 0);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&qt2160->lock, flags);
|
spin_unlock_irqrestore(&qt2160->lock, flags);
|
||||||
|
|
||||||
|
|
|
@ -376,12 +376,7 @@ static void synaptics_i2c_reschedule_work(struct synaptics_i2c *touch,
|
||||||
|
|
||||||
spin_lock_irqsave(&touch->lock, flags);
|
spin_lock_irqsave(&touch->lock, flags);
|
||||||
|
|
||||||
/*
|
mod_delayed_work(system_wq, &touch->dwork, delay);
|
||||||
* If work is already scheduled then subsequent schedules will not
|
|
||||||
* change the scheduled time that's why we have to cancel it first.
|
|
||||||
*/
|
|
||||||
__cancel_delayed_work(&touch->dwork);
|
|
||||||
schedule_delayed_work(&touch->dwork, delay);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&touch->lock, flags);
|
spin_unlock_irqrestore(&touch->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -120,22 +120,13 @@ static void linkwatch_schedule_work(int urgent)
|
||||||
delay = 0;
|
delay = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is true if we've scheduled it immeditately or if we don't
|
* If urgent, schedule immediate execution; otherwise, don't
|
||||||
* need an immediate execution and it's already pending.
|
* override the existing timer.
|
||||||
*/
|
*/
|
||||||
if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
|
if (test_bit(LW_URGENT, &linkwatch_flags))
|
||||||
return;
|
mod_delayed_work(system_wq, &linkwatch_work, 0);
|
||||||
|
else
|
||||||
/* Don't bother if there is nothing urgent. */
|
schedule_delayed_work(&linkwatch_work, delay);
|
||||||
if (!test_bit(LW_URGENT, &linkwatch_flags))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* It's already running which is good enough. */
|
|
||||||
if (!__cancel_delayed_work(&linkwatch_work))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Otherwise we reschedule it again for immediate execution. */
|
|
||||||
schedule_delayed_work(&linkwatch_work, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue