blk-mq: fix sysfs registration/unregistration race
There is a race between cpu hotplug handling and adding/deleting gendisk for blk-mq, where both are trying to register and unregister the same sysfs entries. null_add_dev --> blk_mq_init_queue --> blk_mq_init_allocated_queue --> add to 'all_q_list' (*) --> add_disk --> blk_register_queue --> blk_mq_register_disk (++) null_del_dev --> del_gendisk --> blk_unregister_queue --> blk_mq_unregister_disk (--) --> blk_cleanup_queue --> blk_mq_free_queue --> del from 'all_q_list' (*) blk_mq_queue_reinit --> blk_mq_sysfs_unregister (-) --> blk_mq_sysfs_register (+) While the request queue is added to 'all_q_list' (*), blk_mq_queue_reinit() can be called for the queue anytime by CPU hotplug callback. But blk_mq_sysfs_unregister (-) and blk_mq_sysfs_register (+) in blk_mq_queue_reinit must not be called before blk_mq_register_disk (++) and after blk_mq_unregister_disk (--) is finished. Because '/sys/block/*/mq/' is not exists. There has already been BLK_MQ_F_SYSFS_UP flag in hctx->flags which can be used to track these sysfs stuff, but it is only fixing this issue partially. In order to fix it completely, we just need per-queue flag instead of per-hctx flag with appropriate locking. So this introduces q->mq_sysfs_init_done which is properly protected with all_q_mutex. Also, we need to ensure that blk_mq_map_swqueue() is called with all_q_mutex is held. Since hctx->nr_ctx is reset temporarily and updated in blk_mq_map_swqueue(), so we should avoid blk_mq_register_hctx() seeing the temporary hctx->nr_ctx value in CPU hotplug handling or adding/deleting gendisk . Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Reviewed-by: Ming Lei <tom.leiming@gmail.com> Cc: Ming Lei <tom.leiming@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
1356aae083
commit
4593fdbe7a
|
@ -343,7 +343,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
||||||
struct blk_mq_ctx *ctx;
|
struct blk_mq_ctx *ctx;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
|
if (!hctx->nr_ctx)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
hctx_for_each_ctx(hctx, ctx, i)
|
hctx_for_each_ctx(hctx, ctx, i)
|
||||||
|
@ -358,7 +358,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
|
||||||
struct blk_mq_ctx *ctx;
|
struct blk_mq_ctx *ctx;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
|
if (!hctx->nr_ctx)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
|
ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
|
||||||
|
@ -381,6 +381,8 @@ void blk_mq_unregister_disk(struct gendisk *disk)
|
||||||
struct blk_mq_ctx *ctx;
|
struct blk_mq_ctx *ctx;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
|
blk_mq_disable_hotplug();
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
blk_mq_unregister_hctx(hctx);
|
blk_mq_unregister_hctx(hctx);
|
||||||
|
|
||||||
|
@ -395,6 +397,9 @@ void blk_mq_unregister_disk(struct gendisk *disk)
|
||||||
kobject_put(&q->mq_kobj);
|
kobject_put(&q->mq_kobj);
|
||||||
|
|
||||||
kobject_put(&disk_to_dev(disk)->kobj);
|
kobject_put(&disk_to_dev(disk)->kobj);
|
||||||
|
|
||||||
|
q->mq_sysfs_init_done = false;
|
||||||
|
blk_mq_enable_hotplug();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_sysfs_init(struct request_queue *q)
|
static void blk_mq_sysfs_init(struct request_queue *q)
|
||||||
|
@ -425,27 +430,30 @@ int blk_mq_register_disk(struct gendisk *disk)
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
|
blk_mq_disable_hotplug();
|
||||||
|
|
||||||
blk_mq_sysfs_init(q);
|
blk_mq_sysfs_init(q);
|
||||||
|
|
||||||
ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
|
ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
goto out;
|
||||||
|
|
||||||
kobject_uevent(&q->mq_kobj, KOBJ_ADD);
|
kobject_uevent(&q->mq_kobj, KOBJ_ADD);
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
hctx->flags |= BLK_MQ_F_SYSFS_UP;
|
|
||||||
ret = blk_mq_register_hctx(hctx);
|
ret = blk_mq_register_hctx(hctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret) {
|
if (ret)
|
||||||
blk_mq_unregister_disk(disk);
|
blk_mq_unregister_disk(disk);
|
||||||
return ret;
|
else
|
||||||
}
|
q->mq_sysfs_init_done = true;
|
||||||
|
out:
|
||||||
|
blk_mq_enable_hotplug();
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_register_disk);
|
EXPORT_SYMBOL_GPL(blk_mq_register_disk);
|
||||||
|
|
||||||
|
@ -454,6 +462,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (!q->mq_sysfs_init_done)
|
||||||
|
return;
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i)
|
queue_for_each_hw_ctx(q, hctx, i)
|
||||||
blk_mq_unregister_hctx(hctx);
|
blk_mq_unregister_hctx(hctx);
|
||||||
}
|
}
|
||||||
|
@ -463,6 +474,9 @@ int blk_mq_sysfs_register(struct request_queue *q)
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
|
|
||||||
|
if (!q->mq_sysfs_init_done)
|
||||||
|
return ret;
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
ret = blk_mq_register_hctx(hctx);
|
ret = blk_mq_register_hctx(hctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -2035,13 +2035,13 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||||
goto err_hctxs;
|
goto err_hctxs;
|
||||||
|
|
||||||
mutex_lock(&all_q_mutex);
|
mutex_lock(&all_q_mutex);
|
||||||
|
|
||||||
list_add_tail(&q->all_q_node, &all_q_list);
|
list_add_tail(&q->all_q_node, &all_q_list);
|
||||||
mutex_unlock(&all_q_mutex);
|
|
||||||
|
|
||||||
blk_mq_add_queue_tag_set(set, q);
|
blk_mq_add_queue_tag_set(set, q);
|
||||||
|
|
||||||
blk_mq_map_swqueue(q);
|
blk_mq_map_swqueue(q);
|
||||||
|
|
||||||
|
mutex_unlock(&all_q_mutex);
|
||||||
|
|
||||||
return q;
|
return q;
|
||||||
|
|
||||||
err_hctxs:
|
err_hctxs:
|
||||||
|
|
|
@ -145,7 +145,6 @@ enum {
|
||||||
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
|
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
|
||||||
BLK_MQ_F_TAG_SHARED = 1 << 1,
|
BLK_MQ_F_TAG_SHARED = 1 << 1,
|
||||||
BLK_MQ_F_SG_MERGE = 1 << 2,
|
BLK_MQ_F_SG_MERGE = 1 << 2,
|
||||||
BLK_MQ_F_SYSFS_UP = 1 << 3,
|
|
||||||
BLK_MQ_F_DEFER_ISSUE = 1 << 4,
|
BLK_MQ_F_DEFER_ISSUE = 1 << 4,
|
||||||
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
|
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
|
||||||
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
|
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
|
||||||
|
|
|
@ -456,6 +456,8 @@ struct request_queue {
|
||||||
struct blk_mq_tag_set *tag_set;
|
struct blk_mq_tag_set *tag_set;
|
||||||
struct list_head tag_set_list;
|
struct list_head tag_set_list;
|
||||||
struct bio_set *bio_split;
|
struct bio_set *bio_split;
|
||||||
|
|
||||||
|
bool mq_sysfs_init_done;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
|
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
|
||||||
|
|
Loading…
Reference in New Issue