mirror of https://gitee.com/openkylin/linux.git
block: add missing blk_queue_dead() checks
blk_insert_cloned_request(), blk_execute_rq_nowait() and blk_flush_plug_list() either didn't check whether the queue was dead or did it without holding queue_lock. Update them so that dead state is checked while holding queue_lock. AFAICS, this plugs all holes (requeue doesn't matter as the request is transitioning atomically from in_flight to queued). Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
481a7d6479
commit
8ba61435d7
|
@ -1731,6 +1731,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
spin_lock_irqsave(q->queue_lock, flags);
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
|
if (unlikely(blk_queue_dead(q))) {
|
||||||
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Submitting request must be dequeued before calling this function
|
* Submitting request must be dequeued before calling this function
|
||||||
|
@ -2704,6 +2708,14 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
|
||||||
{
|
{
|
||||||
trace_block_unplug(q, depth, !from_schedule);
|
trace_block_unplug(q, depth, !from_schedule);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't mess with dead queue.
|
||||||
|
*/
|
||||||
|
if (unlikely(blk_queue_dead(q))) {
|
||||||
|
spin_unlock(q->queue_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we are punting this to kblockd, then we can safely drop
|
* If we are punting this to kblockd, then we can safely drop
|
||||||
* the queue_lock before waking kblockd (which needs to take
|
* the queue_lock before waking kblockd (which needs to take
|
||||||
|
@ -2780,6 +2792,15 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||||
depth = 0;
|
depth = 0;
|
||||||
spin_lock(q->queue_lock);
|
spin_lock(q->queue_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Short-circuit if @q is dead
|
||||||
|
*/
|
||||||
|
if (unlikely(blk_queue_dead(q))) {
|
||||||
|
__blk_end_request_all(rq, -ENODEV);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* rq is already accounted, so use raw insert
|
* rq is already accounted, so use raw insert
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -50,7 +50,11 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||||
{
|
{
|
||||||
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
||||||
|
|
||||||
|
WARN_ON(irqs_disabled());
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
|
||||||
if (unlikely(blk_queue_dead(q))) {
|
if (unlikely(blk_queue_dead(q))) {
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
rq->errors = -ENXIO;
|
rq->errors = -ENXIO;
|
||||||
if (rq->end_io)
|
if (rq->end_io)
|
||||||
rq->end_io(rq, rq->errors);
|
rq->end_io(rq, rq->errors);
|
||||||
|
@ -59,8 +63,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||||
|
|
||||||
rq->rq_disk = bd_disk;
|
rq->rq_disk = bd_disk;
|
||||||
rq->end_io = done;
|
rq->end_io = done;
|
||||||
WARN_ON(irqs_disabled());
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
__elv_add_request(q, rq, where);
|
__elv_add_request(q, rq, where);
|
||||||
__blk_run_queue(q);
|
__blk_run_queue(q);
|
||||||
/* the queue is stopped so it won't be run */
|
/* the queue is stopped so it won't be run */
|
||||||
|
|
Loading…
Reference in New Issue