block: disable the elevator int del_gendisk

The elevator is only used for file system requests, which are stopped in
del_gendisk.  Move disabling the elevator and freeing the scheduler tags
to the end of del_gendisk instead of doing that work in disk_release and
blk_cleanup_queue to avoid a use after free on q->tag_set from
disk_release as the tag_set might not be alive at that point.

Move the blk_qos_exit call as well, as it just depends on the elevator
exit and would be the only reason to keep the not exactly cheap queue
freeze in disk_release.

Fixes: e155b0c238 ("blk-mq: Use shared tags for shared sbitmap support")
Reported-by: syzbot+3e3f419f4a7816471838@syzkaller.appspotmail.com
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: syzbot+3e3f419f4a7816471838@syzkaller.appspotmail.com
Link: https://lore.kernel.org/r/20220614074827.458955-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2022-06-14 09:48:24 +02:00 committed by Jens Axboe
parent b96f3cab59
commit 50e34d7881
2 changed files with 11 additions and 41 deletions

View File

@ -322,19 +322,6 @@ void blk_cleanup_queue(struct request_queue *q)
blk_mq_exit_queue(q); blk_mq_exit_queue(q);
} }
/*
* In theory, request pool of sched_tags belongs to request queue.
* However, the current implementation requires tag_set for freeing
* requests, so free the pool now.
*
* Queue has become frozen, there can't be any in-queue requests, so
* it is safe to free requests now.
*/
mutex_lock(&q->sysfs_lock);
if (q->elevator)
blk_mq_sched_free_rqs(q);
mutex_unlock(&q->sysfs_lock);
/* @q is and will stay empty, shutdown and put */ /* @q is and will stay empty, shutdown and put */
blk_put_queue(q); blk_put_queue(q);
} }

View File

@ -652,6 +652,17 @@ void del_gendisk(struct gendisk *disk)
blk_sync_queue(q); blk_sync_queue(q);
blk_flush_integrity(); blk_flush_integrity();
blk_mq_cancel_work_sync(q);
blk_mq_quiesce_queue(q);
if (q->elevator) {
mutex_lock(&q->sysfs_lock);
elevator_exit(q);
mutex_unlock(&q->sysfs_lock);
}
rq_qos_exit(q);
blk_mq_unquiesce_queue(q);
/* /*
* Allow using passthrough request again after the queue is torn down. * Allow using passthrough request again after the queue is torn down.
*/ */
@ -1120,31 +1131,6 @@ static const struct attribute_group *disk_attr_groups[] = {
NULL NULL
}; };
static void disk_release_mq(struct request_queue *q)
{
blk_mq_cancel_work_sync(q);
/*
* There can't be any non non-passthrough bios in flight here, but
* requests stay around longer, including passthrough ones so we
* still need to freeze the queue here.
*/
blk_mq_freeze_queue(q);
/*
* Since the I/O scheduler exit code may access cgroup information,
* perform I/O scheduler exit before disassociating from the block
* cgroup controller.
*/
if (q->elevator) {
mutex_lock(&q->sysfs_lock);
elevator_exit(q);
mutex_unlock(&q->sysfs_lock);
}
rq_qos_exit(q);
__blk_mq_unfreeze_queue(q, true);
}
/** /**
* disk_release - releases all allocated resources of the gendisk * disk_release - releases all allocated resources of the gendisk
* @dev: the device representing this disk * @dev: the device representing this disk
@ -1166,9 +1152,6 @@ static void disk_release(struct device *dev)
might_sleep(); might_sleep();
WARN_ON_ONCE(disk_live(disk)); WARN_ON_ONCE(disk_live(disk));
if (queue_is_mq(disk->queue))
disk_release_mq(disk->queue);
blkcg_exit_queue(disk->queue); blkcg_exit_queue(disk->queue);
disk_release_events(disk); disk_release_events(disk);