mirror of https://gitee.com/openkylin/linux.git
bcache: Kill unaligned bvec hack
Bcache has a hack to avoid cloning the biovec if it's all full pages - but with immutable biovecs coming this won't be necessary anymore. For now, we remove the special case and always clone the bvec array so that the immutable biovec patches are simpler. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
parent
2c30c71bd6
commit
ed9c47bebe
|
@ -279,7 +279,6 @@ struct bcache_device {
|
|||
unsigned long sectors_dirty_last;
|
||||
long sectors_dirty_derivative;
|
||||
|
||||
mempool_t *unaligned_bvec;
|
||||
struct bio_set *bio_split;
|
||||
|
||||
unsigned data_csum:1;
|
||||
|
|
|
@ -606,7 +606,6 @@ struct search {
|
|||
unsigned insert_bio_sectors;
|
||||
|
||||
unsigned recoverable:1;
|
||||
unsigned unaligned_bvec:1;
|
||||
unsigned write:1;
|
||||
unsigned read_dirty_data:1;
|
||||
|
||||
|
@ -614,6 +613,7 @@ struct search {
|
|||
|
||||
struct btree_op op;
|
||||
struct data_insert_op iop;
|
||||
struct bio_vec bv[BIO_MAX_PAGES];
|
||||
};
|
||||
|
||||
static void bch_cache_read_endio(struct bio *bio, int error)
|
||||
|
@ -759,10 +759,14 @@ static void bio_complete(struct search *s)
|
|||
static void do_bio_hook(struct search *s)
|
||||
{
|
||||
struct bio *bio = &s->bio.bio;
|
||||
memcpy(bio, s->orig_bio, sizeof(struct bio));
|
||||
|
||||
bio_init(bio);
|
||||
bio->bi_io_vec = s->bv;
|
||||
bio->bi_max_vecs = BIO_MAX_PAGES;
|
||||
__bio_clone(bio, s->orig_bio);
|
||||
bio->bi_end_io = request_endio;
|
||||
bio->bi_private = &s->cl;
|
||||
|
||||
atomic_set(&bio->bi_cnt, 3);
|
||||
}
|
||||
|
||||
|
@ -774,9 +778,6 @@ static void search_free(struct closure *cl)
|
|||
if (s->iop.bio)
|
||||
bio_put(s->iop.bio);
|
||||
|
||||
if (s->unaligned_bvec)
|
||||
mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
|
||||
|
||||
closure_debug_destroy(cl);
|
||||
mempool_free(s, s->d->c->search);
|
||||
}
|
||||
|
@ -784,7 +785,6 @@ static void search_free(struct closure *cl)
|
|||
static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
|
||||
{
|
||||
struct search *s;
|
||||
struct bio_vec *bv;
|
||||
|
||||
s = mempool_alloc(d->c->search, GFP_NOIO);
|
||||
memset(s, 0, offsetof(struct search, iop.insert_keys));
|
||||
|
@ -803,15 +803,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
|
|||
s->start_time = jiffies;
|
||||
do_bio_hook(s);
|
||||
|
||||
if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
|
||||
bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
|
||||
memcpy(bv, bio_iovec(bio),
|
||||
sizeof(struct bio_vec) * bio_segments(bio));
|
||||
|
||||
s->bio.bio.bi_io_vec = bv;
|
||||
s->unaligned_bvec = 1;
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
|
@ -850,26 +841,13 @@ static void cached_dev_read_error(struct closure *cl)
|
|||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
struct bio *bio = &s->bio.bio;
|
||||
struct bio_vec *bv;
|
||||
int i;
|
||||
|
||||
if (s->recoverable) {
|
||||
/* Retry from the backing device: */
|
||||
trace_bcache_read_retry(s->orig_bio);
|
||||
|
||||
s->iop.error = 0;
|
||||
bv = s->bio.bio.bi_io_vec;
|
||||
do_bio_hook(s);
|
||||
s->bio.bio.bi_io_vec = bv;
|
||||
|
||||
if (!s->unaligned_bvec)
|
||||
bio_for_each_segment(bv, s->orig_bio, i)
|
||||
bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
|
||||
else
|
||||
memcpy(s->bio.bio.bi_io_vec,
|
||||
bio_iovec(s->orig_bio),
|
||||
sizeof(struct bio_vec) *
|
||||
bio_segments(s->orig_bio));
|
||||
|
||||
/* XXX: invalidate cache */
|
||||
|
||||
|
@ -905,8 +883,7 @@ static void cached_dev_read_done(struct closure *cl)
|
|||
s->cache_miss = NULL;
|
||||
}
|
||||
|
||||
if (verify(dc, &s->bio.bio) && s->recoverable &&
|
||||
!s->unaligned_bvec && !s->read_dirty_data)
|
||||
if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
|
||||
bch_data_verify(dc, s->orig_bio);
|
||||
|
||||
bio_complete(s);
|
||||
|
|
|
@ -739,8 +739,6 @@ static void bcache_device_free(struct bcache_device *d)
|
|||
}
|
||||
|
||||
bio_split_pool_free(&d->bio_split_hook);
|
||||
if (d->unaligned_bvec)
|
||||
mempool_destroy(d->unaligned_bvec);
|
||||
if (d->bio_split)
|
||||
bioset_free(d->bio_split);
|
||||
if (is_vmalloc_addr(d->full_dirty_stripes))
|
||||
|
@ -793,8 +791,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
|
|||
return minor;
|
||||
|
||||
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
|
||||
!(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
|
||||
sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
|
||||
bio_split_pool_init(&d->bio_split_hook) ||
|
||||
!(d->disk = alloc_disk(1))) {
|
||||
ida_simple_remove(&bcache_minor, minor);
|
||||
|
|
Loading…
Reference in New Issue