block: move __elv_next_request to blk-core.c
No need to have this helper inline in a header. Also drop the __ prefix. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
7cb04004fa
commit
9c9883744d
|
@ -2517,6 +2517,45 @@ void blk_account_io_start(struct request *rq, bool new_io)
|
||||||
part_stat_unlock();
|
part_stat_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct request *elv_next_request(struct request_queue *q)
|
||||||
|
{
|
||||||
|
struct request *rq;
|
||||||
|
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
|
||||||
|
|
||||||
|
WARN_ON_ONCE(q->mq_ops);
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
if (!list_empty(&q->queue_head)) {
|
||||||
|
rq = list_entry_rq(q->queue_head.next);
|
||||||
|
return rq;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flush request is running and flush request isn't queueable
|
||||||
|
* in the drive, we can hold the queue till flush request is
|
||||||
|
* finished. Even we don't do this, driver can't dispatch next
|
||||||
|
* requests and will requeue them. And this can improve
|
||||||
|
* throughput too. For example, we have request flush1, write1,
|
||||||
|
* flush 2. flush1 is dispatched, then queue is hold, write1
|
||||||
|
* isn't inserted to queue. After flush1 is finished, flush2
|
||||||
|
* will be dispatched. Since disk cache is already clean,
|
||||||
|
* flush2 will be finished very soon, so looks like flush2 is
|
||||||
|
* folded to flush1.
|
||||||
|
* Since the queue is hold, a flag is set to indicate the queue
|
||||||
|
* should be restarted later. Please see flush_end_io() for
|
||||||
|
* details.
|
||||||
|
*/
|
||||||
|
if (fq->flush_pending_idx != fq->flush_running_idx &&
|
||||||
|
!queue_flush_queueable(q)) {
|
||||||
|
fq->flush_queue_delayed = 1;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
if (unlikely(blk_queue_bypass(q)) ||
|
||||||
|
!q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0))
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_peek_request - peek at the top of a request queue
|
* blk_peek_request - peek at the top of a request queue
|
||||||
* @q: request queue to peek at
|
* @q: request queue to peek at
|
||||||
|
@ -2538,8 +2577,7 @@ struct request *blk_peek_request(struct request_queue *q)
|
||||||
lockdep_assert_held(q->queue_lock);
|
lockdep_assert_held(q->queue_lock);
|
||||||
WARN_ON_ONCE(q->mq_ops);
|
WARN_ON_ONCE(q->mq_ops);
|
||||||
|
|
||||||
while ((rq = __elv_next_request(q)) != NULL) {
|
while ((rq = elv_next_request(q)) != NULL) {
|
||||||
|
|
||||||
rq = blk_pm_peek_request(q, rq);
|
rq = blk_pm_peek_request(q, rq);
|
||||||
if (!rq)
|
if (!rq)
|
||||||
break;
|
break;
|
||||||
|
|
39
block/blk.h
39
block/blk.h
|
@ -148,45 +148,6 @@ static inline void blk_clear_rq_complete(struct request *rq)
|
||||||
|
|
||||||
void blk_insert_flush(struct request *rq);
|
void blk_insert_flush(struct request *rq);
|
||||||
|
|
||||||
static inline struct request *__elv_next_request(struct request_queue *q)
|
|
||||||
{
|
|
||||||
struct request *rq;
|
|
||||||
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
|
|
||||||
|
|
||||||
WARN_ON_ONCE(q->mq_ops);
|
|
||||||
|
|
||||||
while (1) {
|
|
||||||
if (!list_empty(&q->queue_head)) {
|
|
||||||
rq = list_entry_rq(q->queue_head.next);
|
|
||||||
return rq;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Flush request is running and flush request isn't queueable
|
|
||||||
* in the drive, we can hold the queue till flush request is
|
|
||||||
* finished. Even we don't do this, driver can't dispatch next
|
|
||||||
* requests and will requeue them. And this can improve
|
|
||||||
* throughput too. For example, we have request flush1, write1,
|
|
||||||
* flush 2. flush1 is dispatched, then queue is hold, write1
|
|
||||||
* isn't inserted to queue. After flush1 is finished, flush2
|
|
||||||
* will be dispatched. Since disk cache is already clean,
|
|
||||||
* flush2 will be finished very soon, so looks like flush2 is
|
|
||||||
* folded to flush1.
|
|
||||||
* Since the queue is hold, a flag is set to indicate the queue
|
|
||||||
* should be restarted later. Please see flush_end_io() for
|
|
||||||
* details.
|
|
||||||
*/
|
|
||||||
if (fq->flush_pending_idx != fq->flush_running_idx &&
|
|
||||||
!queue_flush_queueable(q)) {
|
|
||||||
fq->flush_queue_delayed = 1;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
if (unlikely(blk_queue_bypass(q)) ||
|
|
||||||
!q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0))
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
|
static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
struct elevator_queue *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
Loading…
Reference in New Issue