blk-mq: avoid infinite recursion with the FUA flag
We should not insert requests into the flush state machine from blk_mq_insert_request. All incoming flush requests come through blk_{m,s}q_make_request and are handled there, while blk_execute_rq_nowait should only be called for BLOCK_PC requests. All other callers deal with requests that already went through the flush statemchine and shouldn't be reinserted into it. Reported-by: Robert Elliott <Elliott@hp.com> Debugged-by: Ming Lei <ming.lei@canonical.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
683d0e1262
commit
a57a178a49
|
@ -56,6 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||||
bool is_pm_resume;
|
bool is_pm_resume;
|
||||||
|
|
||||||
WARN_ON(irqs_disabled());
|
WARN_ON(irqs_disabled());
|
||||||
|
WARN_ON(rq->cmd_type == REQ_TYPE_FS);
|
||||||
|
|
||||||
rq->rq_disk = bd_disk;
|
rq->rq_disk = bd_disk;
|
||||||
rq->end_io = done;
|
rq->end_io = done;
|
||||||
|
|
|
@ -963,14 +963,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
|
||||||
|
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||||
|
|
||||||
if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
|
spin_lock(&ctx->lock);
|
||||||
!(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
|
__blk_mq_insert_request(hctx, rq, at_head);
|
||||||
blk_insert_flush(rq);
|
spin_unlock(&ctx->lock);
|
||||||
} else {
|
|
||||||
spin_lock(&ctx->lock);
|
|
||||||
__blk_mq_insert_request(hctx, rq, at_head);
|
|
||||||
spin_unlock(&ctx->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (run_queue)
|
if (run_queue)
|
||||||
blk_mq_run_hw_queue(hctx, async);
|
blk_mq_run_hw_queue(hctx, async);
|
||||||
|
|
Loading…
Reference in New Issue