blk-mq: cleanup blk_mq_get_driver_tag()
We never pass 'wait' as true to blk_mq_get_driver_tag(), and hence
we never change '**hctx' as well. The last use of these went away
with the flush cleanup, commit 0c2a6fe4dc
.
So cleanup the usage and remove the two extra parameters.
Cc: Bart Van Assche <bart.vanassche@wdc.com>
Cc: Christoph Hellwig <hch@lst.de>
Tested-by: Andrew Jones <drjones@redhat.com>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
277a4a9b56
commit
8ab6bb9ee8
|
@ -964,17 +964,14 @@ static inline unsigned int queued_to_index(unsigned int queued)
|
||||||
return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
|
return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
|
bool blk_mq_get_driver_tag(struct request *rq)
|
||||||
bool wait)
|
|
||||||
{
|
{
|
||||||
struct blk_mq_alloc_data data = {
|
struct blk_mq_alloc_data data = {
|
||||||
.q = rq->q,
|
.q = rq->q,
|
||||||
.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
|
.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
|
||||||
.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
|
.flags = BLK_MQ_REQ_NOWAIT,
|
||||||
};
|
};
|
||||||
|
|
||||||
might_sleep_if(wait);
|
|
||||||
|
|
||||||
if (rq->tag != -1)
|
if (rq->tag != -1)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
|
@ -991,8 +988,6 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
|
||||||
}
|
}
|
||||||
|
|
||||||
done:
|
done:
|
||||||
if (hctx)
|
|
||||||
*hctx = data.hctx;
|
|
||||||
return rq->tag != -1;
|
return rq->tag != -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1034,7 +1029,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
|
||||||
* Don't clear RESTART here, someone else could have set it.
|
* Don't clear RESTART here, someone else could have set it.
|
||||||
* At most this will cost an extra queue run.
|
* At most this will cost an extra queue run.
|
||||||
*/
|
*/
|
||||||
return blk_mq_get_driver_tag(rq, hctx, false);
|
return blk_mq_get_driver_tag(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
wait = &this_hctx->dispatch_wait;
|
wait = &this_hctx->dispatch_wait;
|
||||||
|
@ -1055,7 +1050,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
|
||||||
* allocation failure and adding the hardware queue to the wait
|
* allocation failure and adding the hardware queue to the wait
|
||||||
* queue.
|
* queue.
|
||||||
*/
|
*/
|
||||||
ret = blk_mq_get_driver_tag(rq, hctx, false);
|
ret = blk_mq_get_driver_tag(rq);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
spin_unlock(&this_hctx->lock);
|
spin_unlock(&this_hctx->lock);
|
||||||
return false;
|
return false;
|
||||||
|
@ -1105,7 +1100,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
||||||
if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
|
if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!blk_mq_get_driver_tag(rq, NULL, false)) {
|
if (!blk_mq_get_driver_tag(rq)) {
|
||||||
/*
|
/*
|
||||||
* The initial allocation attempt failed, so we need to
|
* The initial allocation attempt failed, so we need to
|
||||||
* rerun the hardware queue when a tag is freed. The
|
* rerun the hardware queue when a tag is freed. The
|
||||||
|
@ -1137,7 +1132,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
||||||
bd.last = true;
|
bd.last = true;
|
||||||
else {
|
else {
|
||||||
nxt = list_first_entry(list, struct request, queuelist);
|
nxt = list_first_entry(list, struct request, queuelist);
|
||||||
bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
|
bd.last = !blk_mq_get_driver_tag(nxt);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = q->mq_ops->queue_rq(hctx, &bd);
|
ret = q->mq_ops->queue_rq(hctx, &bd);
|
||||||
|
@ -1700,7 +1695,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||||
if (!blk_mq_get_dispatch_budget(hctx))
|
if (!blk_mq_get_dispatch_budget(hctx))
|
||||||
goto insert;
|
goto insert;
|
||||||
|
|
||||||
if (!blk_mq_get_driver_tag(rq, NULL, false)) {
|
if (!blk_mq_get_driver_tag(rq)) {
|
||||||
blk_mq_put_dispatch_budget(hctx);
|
blk_mq_put_dispatch_budget(hctx);
|
||||||
goto insert;
|
goto insert;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,8 +36,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
|
||||||
void blk_mq_wake_waiters(struct request_queue *q);
|
void blk_mq_wake_waiters(struct request_queue *q);
|
||||||
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
|
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
|
||||||
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
|
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
|
||||||
bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
|
bool blk_mq_get_driver_tag(struct request *rq);
|
||||||
bool wait);
|
|
||||||
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
|
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
|
||||||
struct blk_mq_ctx *start);
|
struct blk_mq_ctx *start);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue