blk-mq: prepare for implementing hctx table via xarray
It is inevitable to cause use-after-free on q->queue_hw_ctx between queue_for_each_hw_ctx() and blk_mq_update_nr_hw_queues(). And converting to xarray can fix the uaf, meantime code gets cleaner. Prepare for converting q->queue_hctx_ctx into xarray, one thing is that xa_for_each() can only accept 'unsigned long' as index, so changes type of hctx index of queue_for_each_hw_ctx() into 'unsigned long'. Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20220308073219.91173-6-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
de0328d3a2
commit
4f48120874
|
@ -707,7 +707,7 @@ static void debugfs_create_files(struct dentry *parent, void *data,
|
|||
void blk_mq_debugfs_register(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
|
||||
|
||||
|
@ -780,7 +780,7 @@ void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
|||
void blk_mq_debugfs_register_hctxs(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
blk_mq_debugfs_register_hctx(q, hctx);
|
||||
|
@ -789,7 +789,7 @@ void blk_mq_debugfs_register_hctxs(struct request_queue *q)
|
|||
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
blk_mq_debugfs_unregister_hctx(hctx);
|
||||
|
|
|
@ -515,7 +515,7 @@ static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
|
|||
static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (hctx->sched_tags) {
|
||||
|
@ -550,9 +550,10 @@ static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
|
|||
|
||||
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
|
||||
{
|
||||
unsigned int i, flags = q->tag_set->flags;
|
||||
unsigned int flags = q->tag_set->flags;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct elevator_queue *eq;
|
||||
unsigned long i;
|
||||
int ret;
|
||||
|
||||
if (!e) {
|
||||
|
@ -618,7 +619,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
|
|||
void blk_mq_sched_free_rqs(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
if (blk_mq_is_shared_tags(q->tag_set->flags)) {
|
||||
blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
|
||||
|
@ -635,7 +636,7 @@ void blk_mq_sched_free_rqs(struct request_queue *q)
|
|||
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned int i;
|
||||
unsigned long i;
|
||||
unsigned int flags = 0;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
|
|
|
@ -206,7 +206,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
|
|||
void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
lockdep_assert_held(&q->sysfs_dir_lock);
|
||||
|
||||
|
@ -255,7 +255,8 @@ void blk_mq_sysfs_init(struct request_queue *q)
|
|||
int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int ret, i;
|
||||
unsigned long i, j;
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(!q->kobj.parent);
|
||||
lockdep_assert_held(&q->sysfs_dir_lock);
|
||||
|
@ -278,8 +279,10 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
|
|||
return ret;
|
||||
|
||||
unreg:
|
||||
while (--i >= 0)
|
||||
blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
|
||||
queue_for_each_hw_ctx(q, hctx, j) {
|
||||
if (j < i)
|
||||
blk_mq_unregister_hctx(hctx);
|
||||
}
|
||||
|
||||
kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
|
||||
kobject_del(q->mq_kobj);
|
||||
|
@ -290,7 +293,7 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
|
|||
void blk_mq_sysfs_unregister(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
mutex_lock(&q->sysfs_dir_lock);
|
||||
if (!q->mq_sysfs_init_done)
|
||||
|
@ -306,7 +309,8 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
|
|||
int blk_mq_sysfs_register(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i, ret = 0;
|
||||
unsigned long i;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&q->sysfs_dir_lock);
|
||||
if (!q->mq_sysfs_init_done)
|
||||
|
|
|
@ -515,7 +515,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
|
|||
bt_for_each(NULL, q, btags, fn, priv, false);
|
||||
} else {
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
struct blk_mq_tags *tags = hctx->tags;
|
||||
|
|
|
@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
|
|||
void blk_mq_wake_waiters(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned int i;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
if (blk_mq_hw_queue_mapped(hctx))
|
||||
|
@ -1442,7 +1442,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
|
|||
container_of(work, struct request_queue, timeout_work);
|
||||
unsigned long next = 0;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
/* A deadlock might occur if a request is stuck requiring a
|
||||
* timeout at the same time a queue freeze is waiting
|
||||
|
@ -2143,7 +2143,7 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
|
|||
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx, *sq_hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
sq_hctx = NULL;
|
||||
if (blk_mq_has_sqsched(q))
|
||||
|
@ -2171,7 +2171,7 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues);
|
|||
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx, *sq_hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
sq_hctx = NULL;
|
||||
if (blk_mq_has_sqsched(q))
|
||||
|
@ -2209,7 +2209,7 @@ EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
|
|||
bool blk_mq_queue_stopped(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
if (blk_mq_hctx_stopped(hctx))
|
||||
|
@ -2248,7 +2248,7 @@ EXPORT_SYMBOL(blk_mq_stop_hw_queue);
|
|||
void blk_mq_stop_hw_queues(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
blk_mq_stop_hw_queue(hctx);
|
||||
|
@ -2266,7 +2266,7 @@ EXPORT_SYMBOL(blk_mq_start_hw_queue);
|
|||
void blk_mq_start_hw_queues(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
blk_mq_start_hw_queue(hctx);
|
||||
|
@ -2286,7 +2286,7 @@ EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
|
|||
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
blk_mq_start_stopped_hw_queue(hctx, async);
|
||||
|
@ -3446,7 +3446,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
|
|||
struct blk_mq_tag_set *set, int nr_queue)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned int i;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (i == nr_queue)
|
||||
|
@ -3637,7 +3637,8 @@ static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
|
|||
|
||||
static void blk_mq_map_swqueue(struct request_queue *q)
|
||||
{
|
||||
unsigned int i, j, hctx_idx;
|
||||
unsigned int j, hctx_idx;
|
||||
unsigned long i;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct blk_mq_ctx *ctx;
|
||||
struct blk_mq_tag_set *set = q->tag_set;
|
||||
|
@ -3744,7 +3745,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
|||
static void queue_set_hctx_shared(struct request_queue *q, bool shared)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (shared) {
|
||||
|
@ -3844,7 +3845,7 @@ static int blk_mq_alloc_ctxs(struct request_queue *q)
|
|||
void blk_mq_release(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx, *next;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
|
||||
|
@ -4362,7 +4363,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
|
|||
{
|
||||
struct blk_mq_tag_set *set = q->tag_set;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i, ret;
|
||||
int ret;
|
||||
unsigned long i;
|
||||
|
||||
if (!set)
|
||||
return -EINVAL;
|
||||
|
@ -4738,7 +4740,7 @@ void blk_mq_cancel_work_sync(struct request_queue *q)
|
|||
{
|
||||
if (queue_is_mq(q)) {
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
cancel_delayed_work_sync(&q->requeue_work);
|
||||
|
||||
|
|
|
@ -1343,7 +1343,7 @@ static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev,
|
|||
|
||||
static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
|
||||
{
|
||||
int i;
|
||||
unsigned long i;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct rnbd_queue *q;
|
||||
|
||||
|
|
Loading…
Reference in New Issue