blk-mq: Avoid that requeueing starts stopped queues
Since blk_mq_requeue_work() starts stopped queues and since execution of this function can be scheduled after a queue has been stopped it is not possible to stop queues without using an additional state variable to track whether or not the queue has been stopped. Hence modify blk_mq_requeue_work() such that it does not start stopped queues. My conclusion after a review of the blk_mq_stop_hw_queues() and blk_mq_{delay_,}kick_requeue_list() callers is as follows: * In the dm driver starting and stopping queues should only happen if __dm_suspend() or __dm_resume() is called and not if the requeue list is processed. * In the SCSI core queue stopping and starting should only be performed by the scsi_internal_device_block() and scsi_internal_device_unblock() functions but not by any other function. Although the blk_mq_stop_hw_queue() call in scsi_queue_rq() may help to reduce CPU load if a LLD queue is full, figuring out whether or not a queue should be restarted when requeueing a command would require to introduce additional locking in scsi_mq_requeue_cmd() to avoid a race with scsi_internal_device_block(). Avoid this complexity by removing the blk_mq_stop_hw_queue() call from scsi_queue_rq(). * In the NVMe core only the functions that call blk_mq_start_stopped_hw_queues() explicitly should start stopped queues. * A blk_mq_start_stopped_hwqueues() call must be added in the xen-blkfront driver in its blkif_recover() function. Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: James Bottomley <jejb@linux.vnet.ibm.com> Cc: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
2253efc850
commit
52d7f1b5c2
|
@ -501,11 +501,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
|
||||||
blk_mq_insert_request(rq, false, false, false);
|
blk_mq_insert_request(rq, false, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
blk_mq_run_hw_queues(q, false);
|
||||||
* Use the start variant of queue running here, so that running
|
|
||||||
* the requeue work will kick stopped queues.
|
|
||||||
*/
|
|
||||||
blk_mq_start_hw_queues(q);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
|
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
|
||||||
|
|
|
@ -2045,6 +2045,7 @@ static int blkif_recover(struct blkfront_info *info)
|
||||||
BUG_ON(req->nr_phys_segments > segs);
|
BUG_ON(req->nr_phys_segments > segs);
|
||||||
blk_mq_requeue_request(req);
|
blk_mq_requeue_request(req);
|
||||||
}
|
}
|
||||||
|
blk_mq_start_stopped_hw_queues(info->rq, true);
|
||||||
blk_mq_kick_requeue_list(info->rq);
|
blk_mq_kick_requeue_list(info->rq);
|
||||||
|
|
||||||
while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
|
while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
|
||||||
|
|
|
@ -338,12 +338,7 @@ static void dm_old_requeue_request(struct request *rq)
|
||||||
|
|
||||||
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
|
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
blk_mq_delay_kick_requeue_list(q, msecs);
|
||||||
|
|
||||||
spin_lock_irqsave(q->queue_lock, flags);
|
|
||||||
if (!blk_queue_stopped(q))
|
|
||||||
blk_mq_delay_kick_requeue_list(q, msecs);
|
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void dm_mq_kick_requeue_list(struct mapped_device *md)
|
void dm_mq_kick_requeue_list(struct mapped_device *md)
|
||||||
|
|
|
@ -1952,7 +1952,6 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
out:
|
out:
|
||||||
switch (ret) {
|
switch (ret) {
|
||||||
case BLK_MQ_RQ_QUEUE_BUSY:
|
case BLK_MQ_RQ_QUEUE_BUSY:
|
||||||
blk_mq_stop_hw_queue(hctx);
|
|
||||||
if (atomic_read(&sdev->device_busy) == 0 &&
|
if (atomic_read(&sdev->device_busy) == 0 &&
|
||||||
!scsi_device_blocked(sdev))
|
!scsi_device_blocked(sdev))
|
||||||
blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
|
blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
|
||||||
|
|
Loading…
Reference in New Issue