blk-mq: cleanup and rename __blk_mq_alloc_request
The newly added loop for the cached requests in __blk_mq_alloc_request is a little too convoluted for my taste, so unwind it a bit. Also rename the function to __blk_mq_alloc_requests now that it can allocate more than a single request. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20211012104045.658051-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
47c122e35d
commit
b90cfaed37
|
@ -354,7 +354,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||||
return rq;
|
return rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
|
||||||
{
|
{
|
||||||
struct request_queue *q = data->q;
|
struct request_queue *q = data->q;
|
||||||
struct elevator_queue *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
@ -395,36 +395,36 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
tag = blk_mq_get_tag(data);
|
tag = blk_mq_get_tag(data);
|
||||||
if (tag != BLK_MQ_NO_TAG) {
|
if (tag == BLK_MQ_NO_TAG) {
|
||||||
rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
|
if (data->flags & BLK_MQ_REQ_NOWAIT)
|
||||||
if (!--data->nr_tags)
|
break;
|
||||||
return rq;
|
/*
|
||||||
if (e || data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
|
* Give up the CPU and sleep for a random short time to
|
||||||
return rq;
|
* ensure that thread using a realtime scheduling class
|
||||||
rq->rq_next = *data->cached_rq;
|
* are migrated off the CPU, and thus off the hctx that
|
||||||
*data->cached_rq = rq;
|
* is going away.
|
||||||
data->flags |= BLK_MQ_REQ_NOWAIT;
|
*/
|
||||||
continue;
|
msleep(3);
|
||||||
|
goto retry;
|
||||||
}
|
}
|
||||||
if (data->flags & BLK_MQ_REQ_NOWAIT)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/*
|
rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
|
||||||
* Give up the CPU and sleep for a random short time to ensure
|
if (!--data->nr_tags || e ||
|
||||||
* that thread using a realtime scheduling class are migrated
|
(data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
|
||||||
* off the CPU, and thus off the hctx that is going away.
|
return rq;
|
||||||
*/
|
|
||||||
msleep(3);
|
/* link into the cached list */
|
||||||
goto retry;
|
rq->rq_next = *data->cached_rq;
|
||||||
|
*data->cached_rq = rq;
|
||||||
|
data->flags |= BLK_MQ_REQ_NOWAIT;
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
if (data->cached_rq) {
|
if (!data->cached_rq)
|
||||||
rq = *data->cached_rq;
|
return NULL;
|
||||||
*data->cached_rq = rq->rq_next;
|
|
||||||
return rq;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
rq = *data->cached_rq;
|
||||||
|
*data->cached_rq = rq->rq_next;
|
||||||
|
return rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
||||||
|
@ -443,7 +443,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
rq = __blk_mq_alloc_request(&data);
|
rq = __blk_mq_alloc_requests(&data);
|
||||||
if (!rq)
|
if (!rq)
|
||||||
goto out_queue_exit;
|
goto out_queue_exit;
|
||||||
rq->__data_len = 0;
|
rq->__data_len = 0;
|
||||||
|
@ -2258,7 +2258,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
||||||
plug->nr_ios = 1;
|
plug->nr_ios = 1;
|
||||||
data.cached_rq = &plug->cached_rq;
|
data.cached_rq = &plug->cached_rq;
|
||||||
}
|
}
|
||||||
rq = __blk_mq_alloc_request(&data);
|
rq = __blk_mq_alloc_requests(&data);
|
||||||
if (unlikely(!rq)) {
|
if (unlikely(!rq)) {
|
||||||
rq_qos_cleanup(q, bio);
|
rq_qos_cleanup(q, bio);
|
||||||
if (bio->bi_opf & REQ_NOWAIT)
|
if (bio->bi_opf & REQ_NOWAIT)
|
||||||
|
|
Loading…
Reference in New Issue