diff --git a/block/blk-core.c b/block/blk-core.c index 1e97f9973523..78683ea61c93 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1066,6 +1066,20 @@ generic_make_request_checks(struct bio *bio) return false; } +static blk_qc_t do_make_request(struct bio *bio) +{ + struct request_queue *q = bio->bi_disk->queue; + blk_qc_t ret = BLK_QC_T_NONE; + + if (blk_crypto_bio_prep(&bio)) { + if (!q->make_request_fn) + return blk_mq_make_request(q, bio); + ret = q->make_request_fn(q, bio); + } + blk_queue_exit(q); + return ret; +} + /** * generic_make_request - re-submit a bio to the block device layer for I/O * @bio: The bio describing the location in memory and on the device. @@ -1131,14 +1145,7 @@ blk_qc_t generic_make_request(struct bio *bio) /* Create a fresh bio_list for all subordinate requests */ bio_list_on_stack[1] = bio_list_on_stack[0]; bio_list_init(&bio_list_on_stack[0]); - if (blk_crypto_bio_prep(&bio)) { - if (q->make_request_fn) - ret = q->make_request_fn(q, bio); - else - ret = blk_mq_make_request(q, bio); - } - - blk_queue_exit(q); + ret = do_make_request(bio); /* sort new bios into those for a lower level * and those for the same level @@ -1175,7 +1182,6 @@ EXPORT_SYMBOL(generic_make_request); blk_qc_t direct_make_request(struct bio *bio) { struct request_queue *q = bio->bi_disk->queue; - blk_qc_t ret = BLK_QC_T_NONE; if (WARN_ON_ONCE(q->make_request_fn)) { bio_io_error(bio); @@ -1185,10 +1191,11 @@ blk_qc_t direct_make_request(struct bio *bio) return BLK_QC_T_NONE; if (unlikely(bio_queue_enter(bio))) return BLK_QC_T_NONE; - if (blk_crypto_bio_prep(&bio)) - ret = blk_mq_make_request(q, bio); - blk_queue_exit(q); - return ret; + if (!blk_crypto_bio_prep(&bio)) { + blk_queue_exit(q); + return BLK_QC_T_NONE; + } + return blk_mq_make_request(q, bio); } EXPORT_SYMBOL_GPL(direct_make_request); diff --git a/block/blk-mq.c b/block/blk-mq.c index b1c12de8926e..cac11945f602 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2028,26 +2028,24 @@ blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) __blk_queue_split(q, &bio, &nr_segs); if (!bio_integrity_prep(bio)) - return BLK_QC_T_NONE; + goto queue_exit; if (!is_flush_fua && !blk_queue_nomerges(q) && blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq)) - return BLK_QC_T_NONE; + goto queue_exit; if (blk_mq_sched_bio_merge(q, bio, nr_segs)) - return BLK_QC_T_NONE; + goto queue_exit; rq_qos_throttle(q, bio); data.cmd_flags = bio->bi_opf; - blk_queue_enter_live(q); rq = blk_mq_get_request(q, bio, &data); if (unlikely(!rq)) { - blk_queue_exit(q); rq_qos_cleanup(q, bio); if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); - return BLK_QC_T_NONE; + goto queue_exit; } trace_block_getrq(q, bio, bio->bi_opf); @@ -2134,6 +2132,9 @@ blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) } return cookie; +queue_exit: + blk_queue_exit(q); + return BLK_QC_T_NONE; } EXPORT_SYMBOL_GPL(blk_mq_make_request); /* only for request based dm */ diff --git a/block/blk.h b/block/blk.h index fc00537026a0..9e6ed5f11823 100644 --- a/block/blk.h +++ b/block/blk.h @@ -64,17 +64,6 @@ void blk_free_flush_queue(struct blk_flush_queue *q); void blk_freeze_queue(struct request_queue *q); -static inline void blk_queue_enter_live(struct request_queue *q) -{ - /* - * Given that running in generic_make_request() context - * guarantees that a live reference against q_usage_counter has - * been established, further references under that same context - * need not check that the queue has been frozen (marked dead). - */ - percpu_ref_get(&q->q_usage_counter); -} - static inline bool biovec_phys_mergeable(struct request_queue *q, struct bio_vec *vec1, struct bio_vec *vec2) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8921cd79422c..f215b8666448 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1791,8 +1791,17 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) int srcu_idx; struct dm_table *map; - if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) + if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { + /* + * We are called with a live reference on q_usage_counter, but + * that one will be released as soon as we return. Grab an + * extra one as blk_mq_make_request expects to be able to + * consume a reference (which lives until the request is freed + * in case a request is allocated). + */ + percpu_ref_get(&q->q_usage_counter); return blk_mq_make_request(q, bio); + } map = dm_get_live_table(md, &srcu_idx);