block: add queue_is_mq() helper
Various spots check for q->mq_ops being non-NULL, but provide a helper to do this instead. Where the ->mq_ops != NULL check is redundant, remove it. Since mq == rq-based now that legacy is gone, get rid of the queue_is_rq_based() and just use queue_is_mq() everywhere. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
dabcefab45
commit
344e9ffcbd
|
@ -1324,7 +1324,7 @@ int blkcg_activate_policy(struct request_queue *q,
|
||||||
if (blkcg_policy_enabled(q, pol))
|
if (blkcg_policy_enabled(q, pol))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
blk_mq_freeze_queue(q);
|
blk_mq_freeze_queue(q);
|
||||||
pd_prealloc:
|
pd_prealloc:
|
||||||
if (!pd_prealloc) {
|
if (!pd_prealloc) {
|
||||||
|
@ -1363,7 +1363,7 @@ int blkcg_activate_policy(struct request_queue *q,
|
||||||
|
|
||||||
spin_unlock_irq(&q->queue_lock);
|
spin_unlock_irq(&q->queue_lock);
|
||||||
out_bypass_end:
|
out_bypass_end:
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
blk_mq_unfreeze_queue(q);
|
blk_mq_unfreeze_queue(q);
|
||||||
if (pd_prealloc)
|
if (pd_prealloc)
|
||||||
pol->pd_free_fn(pd_prealloc);
|
pol->pd_free_fn(pd_prealloc);
|
||||||
|
@ -1387,7 +1387,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
|
||||||
if (!blkcg_policy_enabled(q, pol))
|
if (!blkcg_policy_enabled(q, pol))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
blk_mq_freeze_queue(q);
|
blk_mq_freeze_queue(q);
|
||||||
|
|
||||||
spin_lock_irq(&q->queue_lock);
|
spin_lock_irq(&q->queue_lock);
|
||||||
|
@ -1405,7 +1405,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
|
||||||
|
|
||||||
spin_unlock_irq(&q->queue_lock);
|
spin_unlock_irq(&q->queue_lock);
|
||||||
|
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
blk_mq_unfreeze_queue(q);
|
blk_mq_unfreeze_queue(q);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
|
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
|
||||||
|
|
|
@ -232,7 +232,7 @@ void blk_sync_queue(struct request_queue *q)
|
||||||
del_timer_sync(&q->timeout);
|
del_timer_sync(&q->timeout);
|
||||||
cancel_work_sync(&q->timeout_work);
|
cancel_work_sync(&q->timeout_work);
|
||||||
|
|
||||||
if (q->mq_ops) {
|
if (queue_is_mq(q)) {
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -281,7 +281,7 @@ void blk_set_queue_dying(struct request_queue *q)
|
||||||
*/
|
*/
|
||||||
blk_freeze_queue_start(q);
|
blk_freeze_queue_start(q);
|
||||||
|
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
blk_mq_wake_waiters(q);
|
blk_mq_wake_waiters(q);
|
||||||
|
|
||||||
/* Make blk_queue_enter() reexamine the DYING flag. */
|
/* Make blk_queue_enter() reexamine the DYING flag. */
|
||||||
|
@ -356,7 +356,7 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||||
* blk_freeze_queue() should be enough for cases of passthrough
|
* blk_freeze_queue() should be enough for cases of passthrough
|
||||||
* request.
|
* request.
|
||||||
*/
|
*/
|
||||||
if (q->mq_ops && blk_queue_init_done(q))
|
if (queue_is_mq(q) && blk_queue_init_done(q))
|
||||||
blk_mq_quiesce_queue(q);
|
blk_mq_quiesce_queue(q);
|
||||||
|
|
||||||
/* for synchronous bio-based driver finish in-flight integrity i/o */
|
/* for synchronous bio-based driver finish in-flight integrity i/o */
|
||||||
|
@ -374,7 +374,7 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||||
|
|
||||||
blk_exit_queue(q);
|
blk_exit_queue(q);
|
||||||
|
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
blk_mq_free_queue(q);
|
blk_mq_free_queue(q);
|
||||||
|
|
||||||
percpu_ref_exit(&q->q_usage_counter);
|
percpu_ref_exit(&q->q_usage_counter);
|
||||||
|
@ -982,7 +982,7 @@ generic_make_request_checks(struct bio *bio)
|
||||||
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
|
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
|
||||||
* if queue is not a request based queue.
|
* if queue is not a request based queue.
|
||||||
*/
|
*/
|
||||||
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
|
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
|
||||||
goto not_supported;
|
goto not_supported;
|
||||||
|
|
||||||
if (should_fail_bio(bio))
|
if (should_fail_bio(bio))
|
||||||
|
@ -1657,7 +1657,7 @@ EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
|
||||||
*/
|
*/
|
||||||
int blk_lld_busy(struct request_queue *q)
|
int blk_lld_busy(struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (q->mq_ops && q->mq_ops->busy)
|
if (queue_is_mq(q) && q->mq_ops->busy)
|
||||||
return q->mq_ops->busy(q);
|
return q->mq_ops->busy(q);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -273,8 +273,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
||||||
* assigned to empty flushes, and we deadlock if we are expecting
|
* assigned to empty flushes, and we deadlock if we are expecting
|
||||||
* other requests to make progress. Don't defer for that case.
|
* other requests to make progress. Don't defer for that case.
|
||||||
*/
|
*/
|
||||||
if (!list_empty(&fq->flush_data_in_flight) &&
|
if (!list_empty(&fq->flush_data_in_flight) && q->elevator &&
|
||||||
!(q->mq_ops && q->elevator) &&
|
|
||||||
time_before(jiffies,
|
time_before(jiffies,
|
||||||
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
|
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -150,7 +150,7 @@ void blk_freeze_queue_start(struct request_queue *q)
|
||||||
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
|
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
|
||||||
if (freeze_depth == 1) {
|
if (freeze_depth == 1) {
|
||||||
percpu_ref_kill(&q->q_usage_counter);
|
percpu_ref_kill(&q->q_usage_counter);
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
blk_mq_run_hw_queues(q, false);
|
blk_mq_run_hw_queues(q, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
|
||||||
unsigned long nr;
|
unsigned long nr;
|
||||||
int ret, err;
|
int ret, err;
|
||||||
|
|
||||||
if (!q->mq_ops)
|
if (!queue_is_mq(q))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = queue_var_store(&nr, page, count);
|
ret = queue_var_store(&nr, page, count);
|
||||||
|
@ -835,12 +835,12 @@ static void __blk_release_queue(struct work_struct *work)
|
||||||
|
|
||||||
blk_queue_free_zone_bitmaps(q);
|
blk_queue_free_zone_bitmaps(q);
|
||||||
|
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
blk_mq_release(q);
|
blk_mq_release(q);
|
||||||
|
|
||||||
blk_trace_shutdown(q);
|
blk_trace_shutdown(q);
|
||||||
|
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
blk_mq_debugfs_unregister(q);
|
blk_mq_debugfs_unregister(q);
|
||||||
|
|
||||||
bioset_exit(&q->bio_split);
|
bioset_exit(&q->bio_split);
|
||||||
|
@ -914,7 +914,7 @@ int blk_register_queue(struct gendisk *disk)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (q->mq_ops) {
|
if (queue_is_mq(q)) {
|
||||||
__blk_mq_register_dev(dev, q);
|
__blk_mq_register_dev(dev, q);
|
||||||
blk_mq_debugfs_register(q);
|
blk_mq_debugfs_register(q);
|
||||||
}
|
}
|
||||||
|
@ -925,7 +925,7 @@ int blk_register_queue(struct gendisk *disk)
|
||||||
|
|
||||||
blk_throtl_register_queue(q);
|
blk_throtl_register_queue(q);
|
||||||
|
|
||||||
if ((q->mq_ops && q->elevator)) {
|
if (q->elevator) {
|
||||||
ret = elv_register_queue(q);
|
ret = elv_register_queue(q);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mutex_unlock(&q->sysfs_lock);
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
@ -974,7 +974,7 @@ void blk_unregister_queue(struct gendisk *disk)
|
||||||
* Remove the sysfs attributes before unregistering the queue data
|
* Remove the sysfs attributes before unregistering the queue data
|
||||||
* structures that can be modified through sysfs.
|
* structures that can be modified through sysfs.
|
||||||
*/
|
*/
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
blk_mq_unregister_dev(disk_to_dev(disk), q);
|
blk_mq_unregister_dev(disk_to_dev(disk), q);
|
||||||
mutex_unlock(&q->sysfs_lock);
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
|
||||||
|
@ -983,7 +983,7 @@ void blk_unregister_queue(struct gendisk *disk)
|
||||||
blk_trace_remove_sysfs(disk_to_dev(disk));
|
blk_trace_remove_sysfs(disk_to_dev(disk));
|
||||||
|
|
||||||
mutex_lock(&q->sysfs_lock);
|
mutex_lock(&q->sysfs_lock);
|
||||||
if (q->mq_ops && q->elevator)
|
if (q->elevator)
|
||||||
elv_unregister_queue(q);
|
elv_unregister_queue(q);
|
||||||
mutex_unlock(&q->sysfs_lock);
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
|
||||||
|
|
|
@ -2456,7 +2456,7 @@ void blk_throtl_register_queue(struct request_queue *q)
|
||||||
td->throtl_slice = DFL_THROTL_SLICE_HD;
|
td->throtl_slice = DFL_THROTL_SLICE_HD;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
td->track_bio_latency = !queue_is_rq_based(q);
|
td->track_bio_latency = !queue_is_mq(q);
|
||||||
if (!td->track_bio_latency)
|
if (!td->track_bio_latency)
|
||||||
blk_stat_enable_accounting(q);
|
blk_stat_enable_accounting(q);
|
||||||
}
|
}
|
||||||
|
|
|
@ -701,7 +701,7 @@ void wbt_enable_default(struct request_queue *q)
|
||||||
if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
|
if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ))
|
if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
|
||||||
wbt_init(q);
|
wbt_init(q);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(wbt_enable_default);
|
EXPORT_SYMBOL_GPL(wbt_enable_default);
|
||||||
|
|
|
@ -421,7 +421,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
|
||||||
* BIO based queues do not use a scheduler so only q->nr_zones
|
* BIO based queues do not use a scheduler so only q->nr_zones
|
||||||
* needs to be updated so that the sysfs exposed value is correct.
|
* needs to be updated so that the sysfs exposed value is correct.
|
||||||
*/
|
*/
|
||||||
if (!queue_is_rq_based(q)) {
|
if (!queue_is_mq(q)) {
|
||||||
q->nr_zones = nr_zones;
|
q->nr_zones = nr_zones;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -471,7 +471,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
|
||||||
/*
|
/*
|
||||||
* we need a proper transport to send commands, not a stacked device
|
* we need a proper transport to send commands, not a stacked device
|
||||||
*/
|
*/
|
||||||
if (!queue_is_rq_based(q))
|
if (!queue_is_mq(q))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
bcd = &q->bsg_dev;
|
bcd = &q->bsg_dev;
|
||||||
|
|
|
@ -667,7 +667,7 @@ static int __elevator_change(struct request_queue *q, const char *name)
|
||||||
/*
|
/*
|
||||||
* Special case for mq, turn off scheduling
|
* Special case for mq, turn off scheduling
|
||||||
*/
|
*/
|
||||||
if (q->mq_ops && !strncmp(name, "none", 4))
|
if (!strncmp(name, "none", 4))
|
||||||
return elevator_switch(q, NULL);
|
return elevator_switch(q, NULL);
|
||||||
|
|
||||||
strlcpy(elevator_name, name, sizeof(elevator_name));
|
strlcpy(elevator_name, name, sizeof(elevator_name));
|
||||||
|
@ -685,8 +685,7 @@ static int __elevator_change(struct request_queue *q, const char *name)
|
||||||
|
|
||||||
static inline bool elv_support_iosched(struct request_queue *q)
|
static inline bool elv_support_iosched(struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (q->mq_ops && q->tag_set && (q->tag_set->flags &
|
if (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
|
||||||
BLK_MQ_F_NO_SCHED))
|
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -696,7 +695,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!q->mq_ops || !elv_support_iosched(q))
|
if (!queue_is_mq(q) || !elv_support_iosched(q))
|
||||||
return count;
|
return count;
|
||||||
|
|
||||||
ret = __elevator_change(q, name);
|
ret = __elevator_change(q, name);
|
||||||
|
@ -713,7 +712,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
|
||||||
struct elevator_type *__e;
|
struct elevator_type *__e;
|
||||||
int len = 0;
|
int len = 0;
|
||||||
|
|
||||||
if (!queue_is_rq_based(q))
|
if (!queue_is_mq(q))
|
||||||
return sprintf(name, "none\n");
|
return sprintf(name, "none\n");
|
||||||
|
|
||||||
if (!q->elevator)
|
if (!q->elevator)
|
||||||
|
@ -732,7 +731,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
|
||||||
}
|
}
|
||||||
spin_unlock(&elv_list_lock);
|
spin_unlock(&elv_list_lock);
|
||||||
|
|
||||||
if (q->mq_ops && q->elevator)
|
if (q->elevator)
|
||||||
len += sprintf(name+len, "none");
|
len += sprintf(name+len, "none");
|
||||||
|
|
||||||
len += sprintf(len+name, "\n");
|
len += sprintf(len+name, "\n");
|
||||||
|
|
|
@ -47,7 +47,7 @@ static void disk_release_events(struct gendisk *disk);
|
||||||
|
|
||||||
void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
|
void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
|
||||||
{
|
{
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
atomic_inc(&part->in_flight[rw]);
|
atomic_inc(&part->in_flight[rw]);
|
||||||
|
@ -57,7 +57,7 @@ void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
|
||||||
|
|
||||||
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
|
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
|
||||||
{
|
{
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
atomic_dec(&part->in_flight[rw]);
|
atomic_dec(&part->in_flight[rw]);
|
||||||
|
@ -68,7 +68,7 @@ void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
|
||||||
void part_in_flight(struct request_queue *q, struct hd_struct *part,
|
void part_in_flight(struct request_queue *q, struct hd_struct *part,
|
||||||
unsigned int inflight[2])
|
unsigned int inflight[2])
|
||||||
{
|
{
|
||||||
if (q->mq_ops) {
|
if (queue_is_mq(q)) {
|
||||||
blk_mq_in_flight(q, part, inflight);
|
blk_mq_in_flight(q, part, inflight);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,7 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part,
|
||||||
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
||||||
unsigned int inflight[2])
|
unsigned int inflight[2])
|
||||||
{
|
{
|
||||||
if (q->mq_ops) {
|
if (queue_is_mq(q)) {
|
||||||
blk_mq_in_flight_rw(q, part, inflight);
|
blk_mq_in_flight_rw(q, part, inflight);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,7 @@ static unsigned dm_get_blk_mq_queue_depth(void)
|
||||||
|
|
||||||
int dm_request_based(struct mapped_device *md)
|
int dm_request_based(struct mapped_device *md)
|
||||||
{
|
{
|
||||||
return queue_is_rq_based(md->queue);
|
return queue_is_mq(md->queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dm_start_queue(struct request_queue *q)
|
void dm_start_queue(struct request_queue *q)
|
||||||
|
|
|
@ -919,12 +919,12 @@ static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
|
||||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||||
struct verify_rq_based_data *v = data;
|
struct verify_rq_based_data *v = data;
|
||||||
|
|
||||||
if (q->mq_ops)
|
if (queue_is_mq(q))
|
||||||
v->mq_count++;
|
v->mq_count++;
|
||||||
else
|
else
|
||||||
v->sq_count++;
|
v->sq_count++;
|
||||||
|
|
||||||
return queue_is_rq_based(q);
|
return queue_is_mq(q);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dm_table_determine_type(struct dm_table *t)
|
static int dm_table_determine_type(struct dm_table *t)
|
||||||
|
|
|
@ -656,11 +656,7 @@ static inline bool blk_account_rq(struct request *rq)
|
||||||
|
|
||||||
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
|
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
|
||||||
|
|
||||||
/*
|
static inline bool queue_is_mq(struct request_queue *q)
|
||||||
* Driver can handle struct request, if it either has an old style
|
|
||||||
* request_fn defined, or is blk-mq based.
|
|
||||||
*/
|
|
||||||
static inline bool queue_is_rq_based(struct request_queue *q)
|
|
||||||
{
|
{
|
||||||
return q->mq_ops;
|
return q->mq_ops;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue