blk-mq: fix passthrough plugging
First we can't add request into plug list in blk_mq_request_bypass_insert
which may be called when flushing plug list, so nested plug is caused.
Second if polled passthrough request is inserted via blk_execute_rq(),
it can't be added to plug list too since io polling needs the request
to be issued to driver.
Fixes the two by moving plugging into blk_execute_rq_no_wait().
Cc: Christoph Hellwig <hch@lst.de>
Fixes: 1c2d2fff6d
("block: wire-up support for passthrough plugging")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20220512140010.1458645-1-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f569add471
commit
a327c341dc
114
block/blk-mq.c
114
block/blk-mq.c
|
@ -1169,6 +1169,62 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error)
|
|||
complete(waiting);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
|
||||
* queues. This is important for md arrays to benefit from merging
|
||||
* requests.
|
||||
*/
|
||||
static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
|
||||
{
|
||||
if (plug->multiple_queues)
|
||||
return BLK_MAX_REQUEST_COUNT * 2;
|
||||
return BLK_MAX_REQUEST_COUNT;
|
||||
}
|
||||
|
||||
static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
|
||||
{
|
||||
struct request *last = rq_list_peek(&plug->mq_list);
|
||||
|
||||
if (!plug->rq_count) {
|
||||
trace_block_plug(rq->q);
|
||||
} else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
|
||||
(!blk_queue_nomerges(rq->q) &&
|
||||
blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
|
||||
blk_mq_flush_plug_list(plug, false);
|
||||
trace_block_plug(rq->q);
|
||||
}
|
||||
|
||||
if (!plug->multiple_queues && last && last->q != rq->q)
|
||||
plug->multiple_queues = true;
|
||||
if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
|
||||
plug->has_elevator = true;
|
||||
rq->rq_next = NULL;
|
||||
rq_list_add(&plug->mq_list, rq);
|
||||
plug->rq_count++;
|
||||
}
|
||||
|
||||
static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
|
||||
rq_end_io_fn *done, bool use_plug)
|
||||
{
|
||||
WARN_ON(irqs_disabled());
|
||||
WARN_ON(!blk_rq_is_passthrough(rq));
|
||||
|
||||
rq->end_io = done;
|
||||
|
||||
blk_account_io_start(rq);
|
||||
|
||||
if (use_plug && current->plug) {
|
||||
blk_add_rq_to_plug(current->plug, rq);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* don't check dying flag for MQ because the request won't
|
||||
* be reused after dying flag is set
|
||||
*/
|
||||
blk_mq_sched_insert_request(rq, at_head, true, false);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* blk_execute_rq_nowait - insert a request to I/O scheduler for execution
|
||||
* @rq: request to insert
|
||||
|
@ -1184,18 +1240,8 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error)
|
|||
*/
|
||||
void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
|
||||
{
|
||||
WARN_ON(irqs_disabled());
|
||||
WARN_ON(!blk_rq_is_passthrough(rq));
|
||||
__blk_execute_rq_nowait(rq, at_head, done, true);
|
||||
|
||||
rq->end_io = done;
|
||||
|
||||
blk_account_io_start(rq);
|
||||
|
||||
/*
|
||||
* don't check dying flag for MQ because the request won't
|
||||
* be reused after dying flag is set
|
||||
*/
|
||||
blk_mq_sched_insert_request(rq, at_head, true, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
||||
|
||||
|
@ -1233,8 +1279,13 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head)
|
|||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
unsigned long hang_check;
|
||||
|
||||
/*
|
||||
* iopoll requires request to be submitted to driver, so can't
|
||||
* use plug
|
||||
*/
|
||||
rq->end_io_data = &wait;
|
||||
blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq);
|
||||
__blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq,
|
||||
!blk_rq_is_poll(rq));
|
||||
|
||||
/* Prevent hang_check timer from firing at us during very long I/O */
|
||||
hang_check = sysctl_hung_task_timeout_secs;
|
||||
|
@ -2340,40 +2391,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
|
||||
* queues. This is important for md arrays to benefit from merging
|
||||
* requests.
|
||||
*/
|
||||
static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
|
||||
{
|
||||
if (plug->multiple_queues)
|
||||
return BLK_MAX_REQUEST_COUNT * 2;
|
||||
return BLK_MAX_REQUEST_COUNT;
|
||||
}
|
||||
|
||||
static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
|
||||
{
|
||||
struct request *last = rq_list_peek(&plug->mq_list);
|
||||
|
||||
if (!plug->rq_count) {
|
||||
trace_block_plug(rq->q);
|
||||
} else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
|
||||
(!blk_queue_nomerges(rq->q) &&
|
||||
blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
|
||||
blk_mq_flush_plug_list(plug, false);
|
||||
trace_block_plug(rq->q);
|
||||
}
|
||||
|
||||
if (!plug->multiple_queues && last && last->q != rq->q)
|
||||
plug->multiple_queues = true;
|
||||
if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
|
||||
plug->has_elevator = true;
|
||||
rq->rq_next = NULL;
|
||||
rq_list_add(&plug->mq_list, rq);
|
||||
plug->rq_count++;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
|
||||
* @rq: Pointer to request to be inserted.
|
||||
|
@ -2387,12 +2404,7 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
|
|||
bool run_queue)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
struct blk_plug *plug = current->plug;
|
||||
|
||||
if (plug) {
|
||||
blk_add_rq_to_plug(plug, rq);
|
||||
return;
|
||||
}
|
||||
spin_lock(&hctx->lock);
|
||||
if (at_head)
|
||||
list_add(&rq->queuelist, &hctx->dispatch);
|
||||
|
|
Loading…
Reference in New Issue