blk-mq: support concurrent queue quiesce/unquiesce
blk_mq_quiesce_queue() has been used a bit wide now, so far we don't support concurrent/nested quiesce. One biggest issue is that unquiesce can happen unexpectedly in case that quiesce/unquiesce are run concurrently from more than one context. This patch introduces q->mq_quiesce_depth to deal concurrent quiesce, and we only unquiesce queue when it is the last/outer-most one of all contexts. Several kernel panic issue has been reported[1][2][3] when running stress quiesce test. And this patch has been verified in these reports. [1] https://lore.kernel.org/linux-block/9b21c797-e505-3821-4f5b-df7bf9380328@huawei.com/T/#m1fc52431fad7f33b1ffc3f12c4450e4238540787 [2] https://lore.kernel.org/linux-block/9b21c797-e505-3821-4f5b-df7bf9380328@huawei.com/T/#m10ad90afeb9c8cc318334190a7c24c8b5c5e0722 [3] https://listman.redhat.com/archives/dm-devel/2021-September/msg00189.html Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20211014081710.1871747-7-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
1d35d519d8
commit
e70feb8b3e
|
@ -241,7 +241,12 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
|
|||
*/
|
||||
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
|
||||
{
|
||||
blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&q->queue_lock, flags);
|
||||
if (!q->quiesce_depth++)
|
||||
blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
|
||||
spin_unlock_irqrestore(&q->queue_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
|
||||
|
||||
|
@ -282,10 +287,21 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
|
|||
*/
|
||||
void blk_mq_unquiesce_queue(struct request_queue *q)
|
||||
{
|
||||
blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
|
||||
unsigned long flags;
|
||||
bool run_queue = false;
|
||||
|
||||
spin_lock_irqsave(&q->queue_lock, flags);
|
||||
if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
|
||||
;
|
||||
} else if (!--q->quiesce_depth) {
|
||||
blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
|
||||
run_queue = true;
|
||||
}
|
||||
spin_unlock_irqrestore(&q->queue_lock, flags);
|
||||
|
||||
/* dispatch requests which are inserted during quiescing */
|
||||
blk_mq_run_hw_queues(q, true);
|
||||
if (run_queue)
|
||||
blk_mq_run_hw_queues(q, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
|
||||
|
||||
|
|
|
@ -315,6 +315,8 @@ struct request_queue {
|
|||
*/
|
||||
struct mutex mq_freeze_lock;
|
||||
|
||||
int quiesce_depth;
|
||||
|
||||
struct blk_mq_tag_set *tag_set;
|
||||
struct list_head tag_set_list;
|
||||
struct bio_set bio_split;
|
||||
|
|
Loading…
Reference in New Issue