mirror of https://gitee.com/openkylin/linux.git
dm: introduce per_bio_data
Introduce a field per_bio_data_size in struct dm_target. Targets can set this field in the constructor. If a target sets this field to a non-zero value, "per_bio_data_size" bytes of auxiliary data are allocated for each bio submitted to the target. These data can be used for any purpose by the target and help us improve performance by removing some per-target mempools. Per-bio data is accessed with dm_per_bio_data. The argument data_size must be the same as the value per_bio_data_size in dm_target. If the target has a pointer to per_bio_data, it can get a pointer to the bio with dm_bio_from_per_bio_data() function (data_size must be the same as the value passed to dm_per_bio_data). Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
This commit is contained in:
parent
70d6c400ac
commit
c0820cf5ad
|
@ -967,13 +967,22 @@ bool dm_table_request_based(struct dm_table *t)
|
||||||
int dm_table_alloc_md_mempools(struct dm_table *t)
|
int dm_table_alloc_md_mempools(struct dm_table *t)
|
||||||
{
|
{
|
||||||
unsigned type = dm_table_get_type(t);
|
unsigned type = dm_table_get_type(t);
|
||||||
|
unsigned per_bio_data_size = 0;
|
||||||
|
struct dm_target *tgt;
|
||||||
|
unsigned i;
|
||||||
|
|
||||||
if (unlikely(type == DM_TYPE_NONE)) {
|
if (unlikely(type == DM_TYPE_NONE)) {
|
||||||
DMWARN("no table type is set, can't allocate mempools");
|
DMWARN("no table type is set, can't allocate mempools");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
t->mempools = dm_alloc_md_mempools(type, t->integrity_supported);
|
if (type == DM_TYPE_BIO_BASED)
|
||||||
|
for (i = 0; i < t->num_targets; i++) {
|
||||||
|
tgt = t->targets + i;
|
||||||
|
per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
|
||||||
if (!t->mempools)
|
if (!t->mempools)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -62,18 +62,6 @@ struct dm_io {
|
||||||
spinlock_t endio_lock;
|
spinlock_t endio_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* For bio-based dm.
|
|
||||||
* One of these is allocated per target within a bio. Hopefully
|
|
||||||
* this will be simplified out one day.
|
|
||||||
*/
|
|
||||||
struct dm_target_io {
|
|
||||||
struct dm_io *io;
|
|
||||||
struct dm_target *ti;
|
|
||||||
union map_info info;
|
|
||||||
struct bio clone;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For request-based dm.
|
* For request-based dm.
|
||||||
* One of these is allocated per request.
|
* One of these is allocated per request.
|
||||||
|
@ -1980,13 +1968,20 @@ static void free_dev(struct mapped_device *md)
|
||||||
|
|
||||||
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
||||||
{
|
{
|
||||||
struct dm_md_mempools *p;
|
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
|
||||||
|
|
||||||
if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs)
|
if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) {
|
||||||
/* the md already has necessary mempools */
|
/*
|
||||||
|
* The md already has necessary mempools. Reload just the
|
||||||
|
* bioset because front_pad may have changed because
|
||||||
|
* a different table was loaded.
|
||||||
|
*/
|
||||||
|
bioset_free(md->bs);
|
||||||
|
md->bs = p->bs;
|
||||||
|
p->bs = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
p = dm_table_get_md_mempools(t);
|
|
||||||
BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
|
BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
|
||||||
|
|
||||||
md->io_pool = p->io_pool;
|
md->io_pool = p->io_pool;
|
||||||
|
@ -2745,7 +2740,7 @@ int dm_noflush_suspending(struct dm_target *ti)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
|
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
|
||||||
|
|
||||||
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
|
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
|
||||||
{
|
{
|
||||||
struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
|
struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
|
||||||
unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
|
unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
|
||||||
|
@ -2753,6 +2748,8 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
|
||||||
if (!pools)
|
if (!pools)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
|
||||||
|
|
||||||
pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
|
pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
|
||||||
mempool_create_slab_pool(MIN_IOS, _io_cache) :
|
mempool_create_slab_pool(MIN_IOS, _io_cache) :
|
||||||
mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
|
mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
|
||||||
|
@ -2768,7 +2765,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
|
||||||
|
|
||||||
pools->bs = (type == DM_TYPE_BIO_BASED) ?
|
pools->bs = (type == DM_TYPE_BIO_BASED) ?
|
||||||
bioset_create(pool_size,
|
bioset_create(pool_size,
|
||||||
offsetof(struct dm_target_io, clone)) :
|
per_bio_data_size + offsetof(struct dm_target_io, clone)) :
|
||||||
bioset_create(pool_size,
|
bioset_create(pool_size,
|
||||||
offsetof(struct dm_rq_clone_bio_info, clone));
|
offsetof(struct dm_rq_clone_bio_info, clone));
|
||||||
if (!pools->bs)
|
if (!pools->bs)
|
||||||
|
|
|
@ -159,7 +159,7 @@ void dm_kcopyd_exit(void);
|
||||||
/*
|
/*
|
||||||
* Mempool operations
|
* Mempool operations
|
||||||
*/
|
*/
|
||||||
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity);
|
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
|
||||||
void dm_free_md_mempools(struct dm_md_mempools *pools);
|
void dm_free_md_mempools(struct dm_md_mempools *pools);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -210,6 +210,12 @@ struct dm_target {
|
||||||
*/
|
*/
|
||||||
unsigned num_write_same_requests;
|
unsigned num_write_same_requests;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The minimum number of extra bytes allocated in each bio for the
|
||||||
|
* target to use. dm_per_bio_data returns the data location.
|
||||||
|
*/
|
||||||
|
unsigned per_bio_data_size;
|
||||||
|
|
||||||
/* target specific data */
|
/* target specific data */
|
||||||
void *private;
|
void *private;
|
||||||
|
|
||||||
|
@ -246,6 +252,30 @@ struct dm_target_callbacks {
|
||||||
int (*congested_fn) (struct dm_target_callbacks *, int);
|
int (*congested_fn) (struct dm_target_callbacks *, int);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For bio-based dm.
|
||||||
|
* One of these is allocated for each bio.
|
||||||
|
* This structure shouldn't be touched directly by target drivers.
|
||||||
|
* It is here so that we can inline dm_per_bio_data and
|
||||||
|
* dm_bio_from_per_bio_data
|
||||||
|
*/
|
||||||
|
struct dm_target_io {
|
||||||
|
struct dm_io *io;
|
||||||
|
struct dm_target *ti;
|
||||||
|
union map_info info;
|
||||||
|
struct bio clone;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
|
||||||
|
{
|
||||||
|
return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
|
||||||
|
{
|
||||||
|
return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
|
||||||
|
}
|
||||||
|
|
||||||
int dm_register_target(struct target_type *t);
|
int dm_register_target(struct target_type *t);
|
||||||
void dm_unregister_target(struct target_type *t);
|
void dm_unregister_target(struct target_type *t);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue