block: remove the unused lock argument to rq_qos_throttle
Unused now that the legacy request path is gone. Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
373e4af34e
commit
d53375608e
|
@ -276,10 +276,8 @@ static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
|
|||
|
||||
static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
|
||||
struct iolatency_grp *iolat,
|
||||
spinlock_t *lock, bool issue_as_root,
|
||||
bool issue_as_root,
|
||||
bool use_memdelay)
|
||||
__releases(lock)
|
||||
__acquires(lock)
|
||||
{
|
||||
struct rq_wait *rqw = &iolat->rq_wait;
|
||||
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
|
||||
|
@ -311,14 +309,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
|
|||
if (iolatency_may_queue(iolat, &wait, first_block))
|
||||
break;
|
||||
first_block = false;
|
||||
|
||||
if (lock) {
|
||||
spin_unlock_irq(lock);
|
||||
io_schedule();
|
||||
spin_lock_irq(lock);
|
||||
} else {
|
||||
io_schedule();
|
||||
}
|
||||
io_schedule();
|
||||
} while (1);
|
||||
|
||||
finish_wait(&rqw->wait, &wait);
|
||||
|
@ -478,8 +469,7 @@ static void check_scale_change(struct iolatency_grp *iolat)
|
|||
scale_change(iolat, direction > 0);
|
||||
}
|
||||
|
||||
static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
|
||||
spinlock_t *lock)
|
||||
static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
|
||||
{
|
||||
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
|
||||
struct blkcg *blkcg;
|
||||
|
@ -495,13 +485,11 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
|
|||
bio_associate_blkcg(bio, &blkcg->css);
|
||||
blkg = blkg_lookup(blkcg, q);
|
||||
if (unlikely(!blkg)) {
|
||||
if (!lock)
|
||||
spin_lock_irq(q->queue_lock);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
blkg = blkg_lookup_create(blkcg, q);
|
||||
if (IS_ERR(blkg))
|
||||
blkg = NULL;
|
||||
if (!lock)
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
if (!blkg)
|
||||
goto out;
|
||||
|
@ -518,7 +506,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
|
|||
}
|
||||
|
||||
check_scale_change(iolat);
|
||||
__blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
|
||||
__blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
|
||||
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
|
||||
blkg = blkg->parent;
|
||||
}
|
||||
|
|
|
@ -1886,7 +1886,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
if (blk_mq_sched_bio_merge(q, bio))
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
rq_qos_throttle(q, bio, NULL);
|
||||
rq_qos_throttle(q, bio);
|
||||
|
||||
rq = blk_mq_get_request(q, bio, &data);
|
||||
if (unlikely(!rq)) {
|
||||
|
|
|
@ -67,14 +67,13 @@ void rq_qos_requeue(struct request_queue *q, struct request *rq)
|
|||
}
|
||||
}
|
||||
|
||||
void rq_qos_throttle(struct request_queue *q, struct bio *bio,
|
||||
spinlock_t *lock)
|
||||
void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct rq_qos *rqos;
|
||||
|
||||
for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
|
||||
if (rqos->ops->throttle)
|
||||
rqos->ops->throttle(rqos, bio, lock);
|
||||
rqos->ops->throttle(rqos, bio);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ struct rq_qos {
|
|||
};
|
||||
|
||||
struct rq_qos_ops {
|
||||
void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
|
||||
void (*throttle)(struct rq_qos *, struct bio *);
|
||||
void (*track)(struct rq_qos *, struct request *, struct bio *);
|
||||
void (*issue)(struct rq_qos *, struct request *);
|
||||
void (*requeue)(struct rq_qos *, struct request *);
|
||||
|
@ -103,7 +103,7 @@ void rq_qos_done(struct request_queue *, struct request *);
|
|||
void rq_qos_issue(struct request_queue *, struct request *);
|
||||
void rq_qos_requeue(struct request_queue *, struct request *);
|
||||
void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
|
||||
void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
|
||||
void rq_qos_throttle(struct request_queue *, struct bio *);
|
||||
void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
|
||||
void rq_qos_exit(struct request_queue *);
|
||||
#endif
|
||||
|
|
|
@ -521,9 +521,7 @@ static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
|
|||
* the timer to kick off queuing again.
|
||||
*/
|
||||
static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
|
||||
unsigned long rw, spinlock_t *lock)
|
||||
__releases(lock)
|
||||
__acquires(lock)
|
||||
unsigned long rw)
|
||||
{
|
||||
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
|
||||
struct wbt_wait_data data = {
|
||||
|
@ -561,13 +559,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
|
|||
break;
|
||||
}
|
||||
|
||||
if (lock) {
|
||||
spin_unlock_irq(lock);
|
||||
io_schedule();
|
||||
spin_lock_irq(lock);
|
||||
} else
|
||||
io_schedule();
|
||||
|
||||
io_schedule();
|
||||
has_sleeper = false;
|
||||
} while (1);
|
||||
|
||||
|
@ -624,7 +616,7 @@ static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
|
|||
* in an irq held spinlock, if it holds one when calling this function.
|
||||
* If we do sleep, we'll release and re-grab it.
|
||||
*/
|
||||
static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
|
||||
static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
|
||||
{
|
||||
struct rq_wb *rwb = RQWB(rqos);
|
||||
enum wbt_flags flags;
|
||||
|
@ -636,7 +628,7 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
|
|||
return;
|
||||
}
|
||||
|
||||
__wbt_wait(rwb, flags, bio->bi_opf, lock);
|
||||
__wbt_wait(rwb, flags, bio->bi_opf);
|
||||
|
||||
if (!blk_stat_is_active(rwb->cb))
|
||||
rwb_arm_timer(rwb);
|
||||
|
|
Loading…
Reference in New Issue