#include #include #include #include #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" void blk_mq_wait_for_tags(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, bool reserved) { int tag, zero = 0; tag = blk_mq_get_tag(tags, hctx, &zero, __GFP_WAIT, reserved); blk_mq_put_tag(tags, tag, &zero); } static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) { int i; for (i = 0; i < bt->map_nr; i++) { struct blk_mq_bitmap *bm = &bt->map[i]; int ret; ret = find_first_zero_bit(&bm->word, bm->depth); if (ret < bm->depth) return true; } return false; } bool blk_mq_has_free_tags(struct blk_mq_tags *tags) { if (!tags) return true; return bt_has_free_tags(&tags->bitmap_tags); } static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag) { int tag, org_last_tag, end; org_last_tag = last_tag; end = bm->depth; do { restart: tag = find_next_zero_bit(&bm->word, end, last_tag); if (unlikely(tag >= end)) { /* * We started with an offset, start from 0 to * exhaust the map. */ if (org_last_tag && last_tag) { end = last_tag; last_tag = 0; goto restart; } return -1; } last_tag = tag + 1; } while (test_and_set_bit_lock(tag, &bm->word)); return tag; } /* * Straight forward bitmap tag implementation, where each bit is a tag * (cleared == free, and set == busy). The small twist is using per-cpu * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue * contexts. This enables us to drastically limit the space searched, * without dirtying an extra shared cacheline like we would if we stored * the cache value inside the shared blk_mq_bitmap_tags structure. On top * of that, each word of tags is in a separate cacheline. This means that * multiple users will tend to stick to different cachelines, at least * until the map is exhausted. */ static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache) { unsigned int last_tag, org_last_tag; int index, i, tag; last_tag = org_last_tag = *tag_cache; index = TAG_TO_INDEX(bt, last_tag); for (i = 0; i < bt->map_nr; i++) { tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag)); if (tag != -1) { tag += (index << bt->bits_per_word); goto done; } last_tag = 0; if (++index >= bt->map_nr) index = 0; } *tag_cache = 0; return -1; /* * Only update the cache from the allocation path, if we ended * up using the specific cached tag. */ done: if (tag == org_last_tag) { last_tag = tag + 1; if (last_tag >= bt->depth - 1) last_tag = 0; *tag_cache = last_tag; } return tag; } static inline void bt_index_inc(unsigned int *index) { *index = (*index + 1) & (BT_WAIT_QUEUES - 1); } static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx) { struct bt_wait_state *bs; if (!hctx) return &bt->bs[0]; bs = &bt->bs[hctx->wait_index]; bt_index_inc(&hctx->wait_index); return bs; } static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp) { struct bt_wait_state *bs; DEFINE_WAIT(wait); int tag; tag = __bt_get(bt, last_tag); if (tag != -1) return tag; if (!(gfp & __GFP_WAIT)) return -1; bs = bt_wait_ptr(bt, hctx); do { bool was_empty; was_empty = list_empty(&wait.task_list); prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); tag = __bt_get(bt, last_tag); if (tag != -1) break; if (was_empty) atomic_set(&bs->wait_cnt, bt->wake_cnt); io_schedule(); } while (1); finish_wait(&bs->wait, &wait); return tag; } static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp) { int tag; tag = bt_get(&tags->bitmap_tags, hctx, last_tag, gfp); if (tag >= 0) return tag + tags->nr_reserved_tags; return BLK_MQ_TAG_FAIL; } static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, gfp_t gfp) { int tag, zero = 0; if (unlikely(!tags->nr_reserved_tags)) { WARN_ON_ONCE(1); return BLK_MQ_TAG_FAIL; } tag = bt_get(&tags->breserved_tags, NULL, &zero, gfp); if (tag < 0) return BLK_MQ_TAG_FAIL; return tag; } unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved) { if (!reserved) return __blk_mq_get_tag(tags, hctx, last_tag, gfp); return __blk_mq_get_reserved_tag(tags, gfp); } static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) { int i, wake_index; wake_index = bt->wake_index; for (i = 0; i < BT_WAIT_QUEUES; i++) { struct bt_wait_state *bs = &bt->bs[wake_index]; if (waitqueue_active(&bs->wait)) { if (wake_index != bt->wake_index) bt->wake_index = wake_index; return bs; } bt_index_inc(&wake_index); } return NULL; } static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) { const int index = TAG_TO_INDEX(bt, tag); struct bt_wait_state *bs; /* * The unlock memory barrier need to order access to req in free * path and clearing tag bit */ clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word); bs = bt_wake_ptr(bt); if (bs && atomic_dec_and_test(&bs->wait_cnt)) { atomic_set(&bs->wait_cnt, bt->wake_cnt); bt_index_inc(&bt->wake_index); wake_up(&bs->wait); } } static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) { BUG_ON(tag >= tags->nr_tags); bt_clear_tag(&tags->bitmap_tags, tag); } static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, unsigned int tag) { BUG_ON(tag >= tags->nr_reserved_tags); bt_clear_tag(&tags->breserved_tags, tag); } void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag, unsigned int *last_tag) { if (tag >= tags->nr_reserved_tags) { const int real_tag = tag - tags->nr_reserved_tags; __blk_mq_put_tag(tags, real_tag); *last_tag = real_tag; } else __blk_mq_put_reserved_tag(tags, tag); } static void bt_for_each_free(struct blk_mq_bitmap_tags *bt, unsigned long *free_map, unsigned int off) { int i; for (i = 0; i < bt->map_nr; i++) { struct blk_mq_bitmap *bm = &bt->map[i]; int bit = 0; do { bit = find_next_zero_bit(&bm->word, bm->depth, bit); if (bit >= bm->depth) break; __set_bit(bit + off, free_map); bit++; } while (1); off += (1 << bt->bits_per_word); } } void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *, unsigned long *), void *data) { unsigned long *tag_map; size_t map_size; map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG; tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC); if (!tag_map) return; bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags); if (tags->nr_reserved_tags) bt_for_each_free(&tags->breserved_tags, tag_map, 0); fn(data, tag_map); kfree(tag_map); } static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) { unsigned int i, used; for (i = 0, used = 0; i < bt->map_nr; i++) { struct blk_mq_bitmap *bm = &bt->map[i]; used += bitmap_weight(&bm->word, bm->depth); } return bt->depth - used; } static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, int node, bool reserved) { int i; bt->bits_per_word = ilog2(BITS_PER_LONG); /* * Depth can be zero for reserved tags, that's not a failure * condition. */ if (depth) { unsigned int nr, i, map_depth, tags_per_word; tags_per_word = (1 << bt->bits_per_word); /* * If the tag space is small, shrink the number of tags * per word so we spread over a few cachelines, at least. * If less than 4 tags, just forget about it, it's not * going to work optimally anyway. */ if (depth >= 4) { while (tags_per_word * 4 > depth) { bt->bits_per_word--; tags_per_word = (1 << bt->bits_per_word); } } nr = ALIGN(depth, tags_per_word) / tags_per_word; bt->map = kzalloc_node(nr * sizeof(struct blk_mq_bitmap), GFP_KERNEL, node); if (!bt->map) return -ENOMEM; bt->map_nr = nr; map_depth = depth; for (i = 0; i < nr; i++) { bt->map[i].depth = min(map_depth, tags_per_word); map_depth -= tags_per_word; } } bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); if (!bt->bs) { kfree(bt->map); return -ENOMEM; } for (i = 0; i < BT_WAIT_QUEUES; i++) init_waitqueue_head(&bt->bs[i].wait); bt->wake_cnt = BT_WAIT_BATCH; if (bt->wake_cnt > depth / 4) bt->wake_cnt = max(1U, depth / 4); bt->depth = depth; return 0; } static void bt_free(struct blk_mq_bitmap_tags *bt) { kfree(bt->map); kfree(bt->bs); } static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, int node) { unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; if (bt_alloc(&tags->bitmap_tags, depth, node, false)) goto enomem; if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true)) goto enomem; return tags; enomem: bt_free(&tags->bitmap_tags); kfree(tags); return NULL; } struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, unsigned int reserved_tags, int node) { struct blk_mq_tags *tags; if (total_tags > BLK_MQ_TAG_MAX) { pr_err("blk-mq: tag depth too large\n"); return NULL; } tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); if (!tags) return NULL; tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; return blk_mq_init_bitmap_tags(tags, node); } void blk_mq_free_tags(struct blk_mq_tags *tags) { bt_free(&tags->bitmap_tags); bt_free(&tags->breserved_tags); kfree(tags); } void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) { unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; *tag = prandom_u32() % depth; } ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) { char *orig_page = page; unsigned int free, res; if (!tags) return 0; page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " "bits_per_word=%u\n", tags->nr_tags, tags->nr_reserved_tags, tags->bitmap_tags.bits_per_word); free = bt_unused_tags(&tags->bitmap_tags); res = bt_unused_tags(&tags->breserved_tags); page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); return page - orig_page; }