mirror of https://gitee.com/openkylin/linux.git
Add the ability to collect I/O statistics on user-defined regions of a
device-mapper device. This dm-stats code required the reintroduction of a div64_u64_rem() helper, but as a separate method that doesn't slow down div64_u64() -- especially on 32-bit systems. Allow the error target to replace request-based DM devices (e.g. multipath) in addition to bio-based DM devices. Various other small code fixes and improvements to thin-provisioning, DM cache and the DM ioctl interface. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.13 (GNU/Linux) iQEcBAABAgAGBQJSLyNnAAoJEMUj8QotnQNaXVEIAKA1l43enaGiROBZEZXgAGUY 1JUsnHES4ujyn/jtT39jPTQf9AW/rS4FUCrZiXG2aaNHXo7+7cdVoBHAiWc7mXad budBSqn47W7WDyFlQarKwsuYFcdLnqdnieRDMXQ1cN5dl4Rx61LclnsylQd4SSS0 lznXkfOTquetDSuEPOuUHJDZufdacw3PpxWbTKGJld40fd7YZfGWQoG0ek1OeqqL fA30DTlYnkFyhheLCjFcDY6H55Rt7QpBWOUAa2XXLR6GLfk5iFK99autjWk2xTPT nppRwQrw9VH+HdW0jGLU+LRs1Y3nxwT9OBLWt9wav87Smdg/7jQAjwde9eKbO2k= =3ooH -----END PGP SIGNATURE----- Merge tag 'dm-3.12-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device-mapper updates from Mike Snitzer: "Add the ability to collect I/O statistics on user-defined regions of a device-mapper device. This dm-stats code required the reintroduction of a div64_u64_rem() helper, but as a separate method that doesn't slow down div64_u64() -- especially on 32-bit systems. Allow the error target to replace request-based DM devices (e.g. multipath) in addition to bio-based DM devices. Various other small code fixes and improvements to thin-provisioning, DM cache and the DM ioctl interface" * tag 'dm-3.12-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm stripe: silence a couple sparse warnings dm: add statistics support dm thin: always return -ENOSPC if no_free_space is set dm ioctl: cleanup error handling in table_load dm ioctl: increase granularity of type_lock when loading table dm ioctl: prevent rename to empty name or uuid dm thin: set pool read-only if breaking_sharing fails block allocation dm thin: prefix pool error messages with pool device name dm: allow error target to replace bio-based and request-based targets math64: New separate div64_u64_rem helper dm space map: optimise sm_ll_dec and sm_ll_inc dm btree: prefetch child nodes when walking tree for a dm_btree_del dm btree: use pop_frame in dm_btree_del to cleanup code dm cache: eliminate holes in cache structure dm cache: fix stacking of geometry limits dm thin: fix stacking of geometry limits dm thin: add data block size limits to Documentation dm cache: add data block size limits to code and Documentation dm cache: document metadata device is exclussive to a cache dm: stop using WQ_NON_REENTRANT
This commit is contained in:
commit
7426d62871
|
@ -50,14 +50,16 @@ other parameters detailed later):
|
|||
which are dirty, and extra hints for use by the policy object.
|
||||
This information could be put on the cache device, but having it
|
||||
separate allows the volume manager to configure it differently,
|
||||
e.g. as a mirror for extra robustness.
|
||||
e.g. as a mirror for extra robustness. This metadata device may only
|
||||
be used by a single cache device.
|
||||
|
||||
Fixed block size
|
||||
----------------
|
||||
|
||||
The origin is divided up into blocks of a fixed size. This block size
|
||||
is configurable when you first create the cache. Typically we've been
|
||||
using block sizes of 256k - 1024k.
|
||||
using block sizes of 256KB - 1024KB. The block size must be between 64
|
||||
(32KB) and 2097152 (1GB) and a multiple of 64 (32KB).
|
||||
|
||||
Having a fixed block size simplifies the target a lot. But it is
|
||||
something of a compromise. For instance, a small part of a block may be
|
||||
|
|
|
@ -0,0 +1,186 @@
|
|||
DM statistics
|
||||
=============
|
||||
|
||||
Device Mapper supports the collection of I/O statistics on user-defined
|
||||
regions of a DM device. If no regions are defined no statistics are
|
||||
collected so there isn't any performance impact. Only bio-based DM
|
||||
devices are currently supported.
|
||||
|
||||
Each user-defined region specifies a starting sector, length and step.
|
||||
Individual statistics will be collected for each step-sized area within
|
||||
the range specified.
|
||||
|
||||
The I/O statistics counters for each step-sized area of a region are
|
||||
in the same format as /sys/block/*/stat or /proc/diskstats (see:
|
||||
Documentation/iostats.txt). But two extra counters (12 and 13) are
|
||||
provided: total time spent reading and writing in milliseconds. All
|
||||
these counters may be accessed by sending the @stats_print message to
|
||||
the appropriate DM device via dmsetup.
|
||||
|
||||
Each region has a corresponding unique identifier, which we call a
|
||||
region_id, that is assigned when the region is created. The region_id
|
||||
must be supplied when querying statistics about the region, deleting the
|
||||
region, etc. Unique region_ids enable multiple userspace programs to
|
||||
request and process statistics for the same DM device without stepping
|
||||
on each other's data.
|
||||
|
||||
The creation of DM statistics will allocate memory via kmalloc or
|
||||
fallback to using vmalloc space. At most, 1/4 of the overall system
|
||||
memory may be allocated by DM statistics. The admin can see how much
|
||||
memory is used by reading
|
||||
/sys/module/dm_mod/parameters/stats_current_allocated_bytes
|
||||
|
||||
Messages
|
||||
========
|
||||
|
||||
@stats_create <range> <step> [<program_id> [<aux_data>]]
|
||||
|
||||
Create a new region and return the region_id.
|
||||
|
||||
<range>
|
||||
"-" - whole device
|
||||
"<start_sector>+<length>" - a range of <length> 512-byte sectors
|
||||
starting with <start_sector>.
|
||||
|
||||
<step>
|
||||
"<area_size>" - the range is subdivided into areas each containing
|
||||
<area_size> sectors.
|
||||
"/<number_of_areas>" - the range is subdivided into the specified
|
||||
number of areas.
|
||||
|
||||
<program_id>
|
||||
An optional parameter. A name that uniquely identifies
|
||||
the userspace owner of the range. This groups ranges together
|
||||
so that userspace programs can identify the ranges they
|
||||
created and ignore those created by others.
|
||||
The kernel returns this string back in the output of
|
||||
@stats_list message, but it doesn't use it for anything else.
|
||||
|
||||
<aux_data>
|
||||
An optional parameter. A word that provides auxiliary data
|
||||
that is useful to the client program that created the range.
|
||||
The kernel returns this string back in the output of
|
||||
@stats_list message, but it doesn't use this value for anything.
|
||||
|
||||
@stats_delete <region_id>
|
||||
|
||||
Delete the region with the specified id.
|
||||
|
||||
<region_id>
|
||||
region_id returned from @stats_create
|
||||
|
||||
@stats_clear <region_id>
|
||||
|
||||
Clear all the counters except the in-flight i/o counters.
|
||||
|
||||
<region_id>
|
||||
region_id returned from @stats_create
|
||||
|
||||
@stats_list [<program_id>]
|
||||
|
||||
List all regions registered with @stats_create.
|
||||
|
||||
<program_id>
|
||||
An optional parameter.
|
||||
If this parameter is specified, only matching regions
|
||||
are returned.
|
||||
If it is not specified, all regions are returned.
|
||||
|
||||
Output format:
|
||||
<region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
|
||||
|
||||
@stats_print <region_id> [<starting_line> <number_of_lines>]
|
||||
|
||||
Print counters for each step-sized area of a region.
|
||||
|
||||
<region_id>
|
||||
region_id returned from @stats_create
|
||||
|
||||
<starting_line>
|
||||
The index of the starting line in the output.
|
||||
If omitted, all lines are returned.
|
||||
|
||||
<number_of_lines>
|
||||
The number of lines to include in the output.
|
||||
If omitted, all lines are returned.
|
||||
|
||||
Output format for each step-sized area of a region:
|
||||
|
||||
<start_sector>+<length> counters
|
||||
|
||||
The first 11 counters have the same meaning as
|
||||
/sys/block/*/stat or /proc/diskstats.
|
||||
|
||||
Please refer to Documentation/iostats.txt for details.
|
||||
|
||||
1. the number of reads completed
|
||||
2. the number of reads merged
|
||||
3. the number of sectors read
|
||||
4. the number of milliseconds spent reading
|
||||
5. the number of writes completed
|
||||
6. the number of writes merged
|
||||
7. the number of sectors written
|
||||
8. the number of milliseconds spent writing
|
||||
9. the number of I/Os currently in progress
|
||||
10. the number of milliseconds spent doing I/Os
|
||||
11. the weighted number of milliseconds spent doing I/Os
|
||||
|
||||
Additional counters:
|
||||
12. the total time spent reading in milliseconds
|
||||
13. the total time spent writing in milliseconds
|
||||
|
||||
@stats_print_clear <region_id> [<starting_line> <number_of_lines>]
|
||||
|
||||
Atomically print and then clear all the counters except the
|
||||
in-flight i/o counters. Useful when the client consuming the
|
||||
statistics does not want to lose any statistics (those updated
|
||||
between printing and clearing).
|
||||
|
||||
<region_id>
|
||||
region_id returned from @stats_create
|
||||
|
||||
<starting_line>
|
||||
The index of the starting line in the output.
|
||||
If omitted, all lines are printed and then cleared.
|
||||
|
||||
<number_of_lines>
|
||||
The number of lines to process.
|
||||
If omitted, all lines are printed and then cleared.
|
||||
|
||||
@stats_set_aux <region_id> <aux_data>
|
||||
|
||||
Store auxiliary data aux_data for the specified region.
|
||||
|
||||
<region_id>
|
||||
region_id returned from @stats_create
|
||||
|
||||
<aux_data>
|
||||
The string that identifies data which is useful to the client
|
||||
program that created the range. The kernel returns this
|
||||
string back in the output of @stats_list message, but it
|
||||
doesn't use this value for anything.
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
Subdivide the DM device 'vol' into 100 pieces and start collecting
|
||||
statistics on them:
|
||||
|
||||
dmsetup message vol 0 @stats_create - /100
|
||||
|
||||
Set the auxillary data string to "foo bar baz" (the escape for each
|
||||
space must also be escaped, otherwise the shell will consume them):
|
||||
|
||||
dmsetup message vol 0 @stats_set_aux 0 foo\\ bar\\ baz
|
||||
|
||||
List the statistics:
|
||||
|
||||
dmsetup message vol 0 @stats_list
|
||||
|
||||
Print the statistics:
|
||||
|
||||
dmsetup message vol 0 @stats_print 0
|
||||
|
||||
Delete the statistics:
|
||||
|
||||
dmsetup message vol 0 @stats_delete 0
|
|
@ -99,13 +99,14 @@ Using an existing pool device
|
|||
$data_block_size $low_water_mark"
|
||||
|
||||
$data_block_size gives the smallest unit of disk space that can be
|
||||
allocated at a time expressed in units of 512-byte sectors. People
|
||||
primarily interested in thin provisioning may want to use a value such
|
||||
as 1024 (512KB). People doing lots of snapshotting may want a smaller value
|
||||
such as 128 (64KB). If you are not zeroing newly-allocated data,
|
||||
a larger $data_block_size in the region of 256000 (128MB) is suggested.
|
||||
$data_block_size must be the same for the lifetime of the
|
||||
metadata device.
|
||||
allocated at a time expressed in units of 512-byte sectors.
|
||||
$data_block_size must be between 128 (64KB) and 2097152 (1GB) and a
|
||||
multiple of 128 (64KB). $data_block_size cannot be changed after the
|
||||
thin-pool is created. People primarily interested in thin provisioning
|
||||
may want to use a value such as 1024 (512KB). People doing lots of
|
||||
snapshotting may want a smaller value such as 128 (64KB). If you are
|
||||
not zeroing newly-allocated data, a larger $data_block_size in the
|
||||
region of 256000 (128MB) is suggested.
|
||||
|
||||
$low_water_mark is expressed in blocks of size $data_block_size. If
|
||||
free space on the data device drops below this level then a dm event
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#
|
||||
|
||||
dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
|
||||
dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o
|
||||
dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o
|
||||
dm-multipath-y += dm-path-selector.o dm-mpath.o
|
||||
dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \
|
||||
dm-snap-persistent.o
|
||||
|
|
|
@ -67,9 +67,11 @@ static void free_bitset(unsigned long *bits)
|
|||
#define MIGRATION_COUNT_WINDOW 10
|
||||
|
||||
/*
|
||||
* The block size of the device holding cache data must be >= 32KB
|
||||
* The block size of the device holding cache data must be
|
||||
* between 32KB and 1GB.
|
||||
*/
|
||||
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
|
||||
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
|
||||
|
||||
/*
|
||||
* FIXME: the cache is read/write for the time being.
|
||||
|
@ -101,6 +103,8 @@ struct cache {
|
|||
struct dm_target *ti;
|
||||
struct dm_target_callbacks callbacks;
|
||||
|
||||
struct dm_cache_metadata *cmd;
|
||||
|
||||
/*
|
||||
* Metadata is written to this device.
|
||||
*/
|
||||
|
@ -116,11 +120,6 @@ struct cache {
|
|||
*/
|
||||
struct dm_dev *cache_dev;
|
||||
|
||||
/*
|
||||
* Cache features such as write-through.
|
||||
*/
|
||||
struct cache_features features;
|
||||
|
||||
/*
|
||||
* Size of the origin device in _complete_ blocks and native sectors.
|
||||
*/
|
||||
|
@ -138,8 +137,6 @@ struct cache {
|
|||
uint32_t sectors_per_block;
|
||||
int sectors_per_block_shift;
|
||||
|
||||
struct dm_cache_metadata *cmd;
|
||||
|
||||
spinlock_t lock;
|
||||
struct bio_list deferred_bios;
|
||||
struct bio_list deferred_flush_bios;
|
||||
|
@ -148,8 +145,8 @@ struct cache {
|
|||
struct list_head completed_migrations;
|
||||
struct list_head need_commit_migrations;
|
||||
sector_t migration_threshold;
|
||||
atomic_t nr_migrations;
|
||||
wait_queue_head_t migration_wait;
|
||||
atomic_t nr_migrations;
|
||||
|
||||
/*
|
||||
* cache_size entries, dirty if set
|
||||
|
@ -160,9 +157,16 @@ struct cache {
|
|||
/*
|
||||
* origin_blocks entries, discarded if set.
|
||||
*/
|
||||
uint32_t discard_block_size; /* a power of 2 times sectors per block */
|
||||
dm_dblock_t discard_nr_blocks;
|
||||
unsigned long *discard_bitset;
|
||||
uint32_t discard_block_size; /* a power of 2 times sectors per block */
|
||||
|
||||
/*
|
||||
* Rather than reconstructing the table line for the status we just
|
||||
* save it and regurgitate.
|
||||
*/
|
||||
unsigned nr_ctr_args;
|
||||
const char **ctr_args;
|
||||
|
||||
struct dm_kcopyd_client *copier;
|
||||
struct workqueue_struct *wq;
|
||||
|
@ -187,14 +191,12 @@ struct cache {
|
|||
bool loaded_mappings:1;
|
||||
bool loaded_discards:1;
|
||||
|
||||
struct cache_stats stats;
|
||||
|
||||
/*
|
||||
* Rather than reconstructing the table line for the status we just
|
||||
* save it and regurgitate.
|
||||
* Cache features such as write-through.
|
||||
*/
|
||||
unsigned nr_ctr_args;
|
||||
const char **ctr_args;
|
||||
struct cache_features features;
|
||||
|
||||
struct cache_stats stats;
|
||||
};
|
||||
|
||||
struct per_bio_data {
|
||||
|
@ -1687,24 +1689,25 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
|
|||
static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
|
||||
char **error)
|
||||
{
|
||||
unsigned long tmp;
|
||||
unsigned long block_size;
|
||||
|
||||
if (!at_least_one_arg(as, error))
|
||||
return -EINVAL;
|
||||
|
||||
if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
|
||||
tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
|
||||
tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
|
||||
if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
|
||||
block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
|
||||
block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
|
||||
block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
|
||||
*error = "Invalid data block size";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (tmp > ca->cache_sectors) {
|
||||
if (block_size > ca->cache_sectors) {
|
||||
*error = "Data block size is larger than the cache device";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ca->block_size = tmp;
|
||||
ca->block_size = block_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2609,9 +2612,17 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
|
|||
static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
{
|
||||
struct cache *cache = ti->private;
|
||||
uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
|
||||
|
||||
blk_limits_io_min(limits, 0);
|
||||
blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
|
||||
/*
|
||||
* If the system-determined stacked limits are compatible with the
|
||||
* cache's blocksize (io_opt is a factor) do not override them.
|
||||
*/
|
||||
if (io_opt_sectors < cache->sectors_per_block ||
|
||||
do_div(io_opt_sectors, cache->sectors_per_block)) {
|
||||
blk_limits_io_min(limits, 0);
|
||||
blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
|
||||
}
|
||||
set_discard_limits(cache, limits);
|
||||
}
|
||||
|
||||
|
|
|
@ -1645,20 +1645,14 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
cc->io_queue = alloc_workqueue("kcryptd_io",
|
||||
WQ_NON_REENTRANT|
|
||||
WQ_MEM_RECLAIM,
|
||||
1);
|
||||
cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
|
||||
if (!cc->io_queue) {
|
||||
ti->error = "Couldn't create kcryptd io queue";
|
||||
goto bad;
|
||||
}
|
||||
|
||||
cc->crypt_queue = alloc_workqueue("kcryptd",
|
||||
WQ_NON_REENTRANT|
|
||||
WQ_CPU_INTENSIVE|
|
||||
WQ_MEM_RECLAIM,
|
||||
1);
|
||||
WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
|
||||
if (!cc->crypt_queue) {
|
||||
ti->error = "Couldn't create kcryptd queue";
|
||||
goto bad;
|
||||
|
|
|
@ -877,7 +877,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
|
|||
unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
|
||||
|
||||
if (new_data < param->data ||
|
||||
invalid_str(new_data, (void *) param + param_size) ||
|
||||
invalid_str(new_data, (void *) param + param_size) || !*new_data ||
|
||||
strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
|
||||
DMWARN("Invalid new mapped device name or uuid string supplied.");
|
||||
return -EINVAL;
|
||||
|
@ -1262,44 +1262,37 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
|
|||
|
||||
r = dm_table_create(&t, get_mode(param), param->target_count, md);
|
||||
if (r)
|
||||
goto out;
|
||||
goto err;
|
||||
|
||||
/* Protect md->type and md->queue against concurrent table loads. */
|
||||
dm_lock_md_type(md);
|
||||
r = populate_table(t, param, param_size);
|
||||
if (r) {
|
||||
dm_table_destroy(t);
|
||||
goto out;
|
||||
}
|
||||
if (r)
|
||||
goto err_unlock_md_type;
|
||||
|
||||
immutable_target_type = dm_get_immutable_target_type(md);
|
||||
if (immutable_target_type &&
|
||||
(immutable_target_type != dm_table_get_immutable_target_type(t))) {
|
||||
DMWARN("can't replace immutable target type %s",
|
||||
immutable_target_type->name);
|
||||
dm_table_destroy(t);
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
goto err_unlock_md_type;
|
||||
}
|
||||
|
||||
/* Protect md->type and md->queue against concurrent table loads. */
|
||||
dm_lock_md_type(md);
|
||||
if (dm_get_md_type(md) == DM_TYPE_NONE)
|
||||
/* Initial table load: acquire type of table. */
|
||||
dm_set_md_type(md, dm_table_get_type(t));
|
||||
else if (dm_get_md_type(md) != dm_table_get_type(t)) {
|
||||
DMWARN("can't change device type after initial table load.");
|
||||
dm_table_destroy(t);
|
||||
dm_unlock_md_type(md);
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
goto err_unlock_md_type;
|
||||
}
|
||||
|
||||
/* setup md->queue to reflect md's type (may block) */
|
||||
r = dm_setup_md_queue(md);
|
||||
if (r) {
|
||||
DMWARN("unable to set up device queue for new table.");
|
||||
dm_table_destroy(t);
|
||||
dm_unlock_md_type(md);
|
||||
goto out;
|
||||
goto err_unlock_md_type;
|
||||
}
|
||||
dm_unlock_md_type(md);
|
||||
|
||||
|
@ -1309,9 +1302,8 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
|
|||
if (!hc || hc->md != md) {
|
||||
DMWARN("device has been removed from the dev hash table.");
|
||||
up_write(&_hash_lock);
|
||||
dm_table_destroy(t);
|
||||
r = -ENXIO;
|
||||
goto out;
|
||||
goto err_destroy_table;
|
||||
}
|
||||
|
||||
if (hc->new_map)
|
||||
|
@ -1322,7 +1314,6 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
|
|||
param->flags |= DM_INACTIVE_PRESENT_FLAG;
|
||||
__dev_status(md, param);
|
||||
|
||||
out:
|
||||
if (old_map) {
|
||||
dm_sync_table(md);
|
||||
dm_table_destroy(old_map);
|
||||
|
@ -1330,6 +1321,15 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
|
|||
|
||||
dm_put(md);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unlock_md_type:
|
||||
dm_unlock_md_type(md);
|
||||
err_destroy_table:
|
||||
dm_table_destroy(t);
|
||||
err:
|
||||
dm_put(md);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1455,20 +1455,26 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool buffer_test_overflow(char *result, unsigned maxlen)
|
||||
{
|
||||
return !maxlen || strlen(result) + 1 >= maxlen;
|
||||
}
|
||||
|
||||
/*
|
||||
* Process device-mapper dependent messages.
|
||||
* Process device-mapper dependent messages. Messages prefixed with '@'
|
||||
* are processed by the DM core. All others are delivered to the target.
|
||||
* Returns a number <= 1 if message was processed by device mapper.
|
||||
* Returns 2 if message should be delivered to the target.
|
||||
*/
|
||||
static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
|
||||
char *result, unsigned maxlen)
|
||||
{
|
||||
return 2;
|
||||
int r;
|
||||
|
||||
if (**argv != '@')
|
||||
return 2; /* no '@' prefix, deliver to target */
|
||||
|
||||
r = dm_stats_message(md, argc, argv, result, maxlen);
|
||||
if (r < 2)
|
||||
return r;
|
||||
|
||||
DMERR("Unsupported message sent to DM core: %s", argv[0]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1542,7 +1548,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
|
|||
|
||||
if (r == 1) {
|
||||
param->flags |= DM_DATA_OUT_FLAG;
|
||||
if (buffer_test_overflow(result, maxlen))
|
||||
if (dm_message_test_buffer_overflow(result, maxlen))
|
||||
param->flags |= DM_BUFFER_FULL_FLAG;
|
||||
else
|
||||
param->data_size = param->data_start + strlen(result) + 1;
|
||||
|
|
|
@ -833,8 +833,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
|
|||
goto bad_slab;
|
||||
|
||||
INIT_WORK(&kc->kcopyd_work, do_work);
|
||||
kc->kcopyd_wq = alloc_workqueue("kcopyd",
|
||||
WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
|
||||
kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
|
||||
if (!kc->kcopyd_wq)
|
||||
goto bad_workqueue;
|
||||
|
||||
|
|
|
@ -1080,8 +1080,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
|
||||
ti->discard_zeroes_data_unsupported = true;
|
||||
|
||||
ms->kmirrord_wq = alloc_workqueue("kmirrord",
|
||||
WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
|
||||
ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
|
||||
if (!ms->kmirrord_wq) {
|
||||
DMERR("couldn't start kmirrord");
|
||||
r = -ENOMEM;
|
||||
|
|
|
@ -0,0 +1,969 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/device-mapper.h>
|
||||
|
||||
#include "dm.h"
|
||||
#include "dm-stats.h"
|
||||
|
||||
#define DM_MSG_PREFIX "stats"
|
||||
|
||||
static int dm_stat_need_rcu_barrier;
|
||||
|
||||
/*
|
||||
* Using 64-bit values to avoid overflow (which is a
|
||||
* problem that block/genhd.c's IO accounting has).
|
||||
*/
|
||||
struct dm_stat_percpu {
|
||||
unsigned long long sectors[2];
|
||||
unsigned long long ios[2];
|
||||
unsigned long long merges[2];
|
||||
unsigned long long ticks[2];
|
||||
unsigned long long io_ticks[2];
|
||||
unsigned long long io_ticks_total;
|
||||
unsigned long long time_in_queue;
|
||||
};
|
||||
|
||||
struct dm_stat_shared {
|
||||
atomic_t in_flight[2];
|
||||
unsigned long stamp;
|
||||
struct dm_stat_percpu tmp;
|
||||
};
|
||||
|
||||
struct dm_stat {
|
||||
struct list_head list_entry;
|
||||
int id;
|
||||
size_t n_entries;
|
||||
sector_t start;
|
||||
sector_t end;
|
||||
sector_t step;
|
||||
const char *program_id;
|
||||
const char *aux_data;
|
||||
struct rcu_head rcu_head;
|
||||
size_t shared_alloc_size;
|
||||
size_t percpu_alloc_size;
|
||||
struct dm_stat_percpu *stat_percpu[NR_CPUS];
|
||||
struct dm_stat_shared stat_shared[0];
|
||||
};
|
||||
|
||||
struct dm_stats_last_position {
|
||||
sector_t last_sector;
|
||||
unsigned last_rw;
|
||||
};
|
||||
|
||||
/*
|
||||
* A typo on the command line could possibly make the kernel run out of memory
|
||||
* and crash. To prevent the crash we account all used memory. We fail if we
|
||||
* exhaust 1/4 of all memory or 1/2 of vmalloc space.
|
||||
*/
|
||||
#define DM_STATS_MEMORY_FACTOR 4
|
||||
#define DM_STATS_VMALLOC_FACTOR 2
|
||||
|
||||
static DEFINE_SPINLOCK(shared_memory_lock);
|
||||
|
||||
static unsigned long shared_memory_amount;
|
||||
|
||||
static bool __check_shared_memory(size_t alloc_size)
|
||||
{
|
||||
size_t a;
|
||||
|
||||
a = shared_memory_amount + alloc_size;
|
||||
if (a < shared_memory_amount)
|
||||
return false;
|
||||
if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
|
||||
return false;
|
||||
#ifdef CONFIG_MMU
|
||||
if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
|
||||
return false;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool check_shared_memory(size_t alloc_size)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
spin_lock_irq(&shared_memory_lock);
|
||||
|
||||
ret = __check_shared_memory(alloc_size);
|
||||
|
||||
spin_unlock_irq(&shared_memory_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool claim_shared_memory(size_t alloc_size)
|
||||
{
|
||||
spin_lock_irq(&shared_memory_lock);
|
||||
|
||||
if (!__check_shared_memory(alloc_size)) {
|
||||
spin_unlock_irq(&shared_memory_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
shared_memory_amount += alloc_size;
|
||||
|
||||
spin_unlock_irq(&shared_memory_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void free_shared_memory(size_t alloc_size)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&shared_memory_lock, flags);
|
||||
|
||||
if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
|
||||
spin_unlock_irqrestore(&shared_memory_lock, flags);
|
||||
DMCRIT("Memory usage accounting bug.");
|
||||
return;
|
||||
}
|
||||
|
||||
shared_memory_amount -= alloc_size;
|
||||
|
||||
spin_unlock_irqrestore(&shared_memory_lock, flags);
|
||||
}
|
||||
|
||||
static void *dm_kvzalloc(size_t alloc_size, int node)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (!claim_shared_memory(alloc_size))
|
||||
return NULL;
|
||||
|
||||
if (alloc_size <= KMALLOC_MAX_SIZE) {
|
||||
p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
|
||||
if (p)
|
||||
return p;
|
||||
}
|
||||
p = vzalloc_node(alloc_size, node);
|
||||
if (p)
|
||||
return p;
|
||||
|
||||
free_shared_memory(alloc_size);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void dm_kvfree(void *ptr, size_t alloc_size)
|
||||
{
|
||||
if (!ptr)
|
||||
return;
|
||||
|
||||
free_shared_memory(alloc_size);
|
||||
|
||||
if (is_vmalloc_addr(ptr))
|
||||
vfree(ptr);
|
||||
else
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
static void dm_stat_free(struct rcu_head *head)
|
||||
{
|
||||
int cpu;
|
||||
struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
|
||||
|
||||
kfree(s->program_id);
|
||||
kfree(s->aux_data);
|
||||
for_each_possible_cpu(cpu)
|
||||
dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
|
||||
dm_kvfree(s, s->shared_alloc_size);
|
||||
}
|
||||
|
||||
static int dm_stat_in_flight(struct dm_stat_shared *shared)
|
||||
{
|
||||
return atomic_read(&shared->in_flight[READ]) +
|
||||
atomic_read(&shared->in_flight[WRITE]);
|
||||
}
|
||||
|
||||
void dm_stats_init(struct dm_stats *stats)
|
||||
{
|
||||
int cpu;
|
||||
struct dm_stats_last_position *last;
|
||||
|
||||
mutex_init(&stats->mutex);
|
||||
INIT_LIST_HEAD(&stats->list);
|
||||
stats->last = alloc_percpu(struct dm_stats_last_position);
|
||||
for_each_possible_cpu(cpu) {
|
||||
last = per_cpu_ptr(stats->last, cpu);
|
||||
last->last_sector = (sector_t)ULLONG_MAX;
|
||||
last->last_rw = UINT_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
void dm_stats_cleanup(struct dm_stats *stats)
|
||||
{
|
||||
size_t ni;
|
||||
struct dm_stat *s;
|
||||
struct dm_stat_shared *shared;
|
||||
|
||||
while (!list_empty(&stats->list)) {
|
||||
s = container_of(stats->list.next, struct dm_stat, list_entry);
|
||||
list_del(&s->list_entry);
|
||||
for (ni = 0; ni < s->n_entries; ni++) {
|
||||
shared = &s->stat_shared[ni];
|
||||
if (WARN_ON(dm_stat_in_flight(shared))) {
|
||||
DMCRIT("leaked in-flight counter at index %lu "
|
||||
"(start %llu, end %llu, step %llu): reads %d, writes %d",
|
||||
(unsigned long)ni,
|
||||
(unsigned long long)s->start,
|
||||
(unsigned long long)s->end,
|
||||
(unsigned long long)s->step,
|
||||
atomic_read(&shared->in_flight[READ]),
|
||||
atomic_read(&shared->in_flight[WRITE]));
|
||||
}
|
||||
}
|
||||
dm_stat_free(&s->rcu_head);
|
||||
}
|
||||
free_percpu(stats->last);
|
||||
}
|
||||
|
||||
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
|
||||
sector_t step, const char *program_id, const char *aux_data,
|
||||
void (*suspend_callback)(struct mapped_device *),
|
||||
void (*resume_callback)(struct mapped_device *),
|
||||
struct mapped_device *md)
|
||||
{
|
||||
struct list_head *l;
|
||||
struct dm_stat *s, *tmp_s;
|
||||
sector_t n_entries;
|
||||
size_t ni;
|
||||
size_t shared_alloc_size;
|
||||
size_t percpu_alloc_size;
|
||||
struct dm_stat_percpu *p;
|
||||
int cpu;
|
||||
int ret_id;
|
||||
int r;
|
||||
|
||||
if (end < start || !step)
|
||||
return -EINVAL;
|
||||
|
||||
n_entries = end - start;
|
||||
if (dm_sector_div64(n_entries, step))
|
||||
n_entries++;
|
||||
|
||||
if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
|
||||
return -EOVERFLOW;
|
||||
|
||||
shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
|
||||
if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
|
||||
return -EOVERFLOW;
|
||||
|
||||
percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
|
||||
if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
|
||||
return -EOVERFLOW;
|
||||
|
||||
if (!check_shared_memory(shared_alloc_size + num_possible_cpus() * percpu_alloc_size))
|
||||
return -ENOMEM;
|
||||
|
||||
s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
s->n_entries = n_entries;
|
||||
s->start = start;
|
||||
s->end = end;
|
||||
s->step = step;
|
||||
s->shared_alloc_size = shared_alloc_size;
|
||||
s->percpu_alloc_size = percpu_alloc_size;
|
||||
|
||||
s->program_id = kstrdup(program_id, GFP_KERNEL);
|
||||
if (!s->program_id) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
s->aux_data = kstrdup(aux_data, GFP_KERNEL);
|
||||
if (!s->aux_data) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (ni = 0; ni < n_entries; ni++) {
|
||||
atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
|
||||
atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
|
||||
if (!p) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
s->stat_percpu[cpu] = p;
|
||||
}
|
||||
|
||||
/*
|
||||
* Suspend/resume to make sure there is no i/o in flight,
|
||||
* so that newly created statistics will be exact.
|
||||
*
|
||||
* (note: we couldn't suspend earlier because we must not
|
||||
* allocate memory while suspended)
|
||||
*/
|
||||
suspend_callback(md);
|
||||
|
||||
mutex_lock(&stats->mutex);
|
||||
s->id = 0;
|
||||
list_for_each(l, &stats->list) {
|
||||
tmp_s = container_of(l, struct dm_stat, list_entry);
|
||||
if (WARN_ON(tmp_s->id < s->id)) {
|
||||
r = -EINVAL;
|
||||
goto out_unlock_resume;
|
||||
}
|
||||
if (tmp_s->id > s->id)
|
||||
break;
|
||||
if (unlikely(s->id == INT_MAX)) {
|
||||
r = -ENFILE;
|
||||
goto out_unlock_resume;
|
||||
}
|
||||
s->id++;
|
||||
}
|
||||
ret_id = s->id;
|
||||
list_add_tail_rcu(&s->list_entry, l);
|
||||
mutex_unlock(&stats->mutex);
|
||||
|
||||
resume_callback(md);
|
||||
|
||||
return ret_id;
|
||||
|
||||
out_unlock_resume:
|
||||
mutex_unlock(&stats->mutex);
|
||||
resume_callback(md);
|
||||
out:
|
||||
dm_stat_free(&s->rcu_head);
|
||||
return r;
|
||||
}
|
||||
|
||||
static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
|
||||
{
|
||||
struct dm_stat *s;
|
||||
|
||||
list_for_each_entry(s, &stats->list, list_entry) {
|
||||
if (s->id > id)
|
||||
break;
|
||||
if (s->id == id)
|
||||
return s;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int dm_stats_delete(struct dm_stats *stats, int id)
|
||||
{
|
||||
struct dm_stat *s;
|
||||
int cpu;
|
||||
|
||||
mutex_lock(&stats->mutex);
|
||||
|
||||
s = __dm_stats_find(stats, id);
|
||||
if (!s) {
|
||||
mutex_unlock(&stats->mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
list_del_rcu(&s->list_entry);
|
||||
mutex_unlock(&stats->mutex);
|
||||
|
||||
/*
|
||||
* vfree can't be called from RCU callback
|
||||
*/
|
||||
for_each_possible_cpu(cpu)
|
||||
if (is_vmalloc_addr(s->stat_percpu))
|
||||
goto do_sync_free;
|
||||
if (is_vmalloc_addr(s)) {
|
||||
do_sync_free:
|
||||
synchronize_rcu_expedited();
|
||||
dm_stat_free(&s->rcu_head);
|
||||
} else {
|
||||
ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
|
||||
call_rcu(&s->rcu_head, dm_stat_free);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dm_stats_list(struct dm_stats *stats, const char *program,
|
||||
char *result, unsigned maxlen)
|
||||
{
|
||||
struct dm_stat *s;
|
||||
sector_t len;
|
||||
unsigned sz = 0;
|
||||
|
||||
/*
|
||||
* Output format:
|
||||
* <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
|
||||
*/
|
||||
|
||||
mutex_lock(&stats->mutex);
|
||||
list_for_each_entry(s, &stats->list, list_entry) {
|
||||
if (!program || !strcmp(program, s->program_id)) {
|
||||
len = s->end - s->start;
|
||||
DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id,
|
||||
(unsigned long long)s->start,
|
||||
(unsigned long long)len,
|
||||
(unsigned long long)s->step,
|
||||
s->program_id,
|
||||
s->aux_data);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&stats->mutex);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu *p)
|
||||
{
|
||||
/*
|
||||
* This is racy, but so is part_round_stats_single.
|
||||
*/
|
||||
unsigned long now = jiffies;
|
||||
unsigned in_flight_read;
|
||||
unsigned in_flight_write;
|
||||
unsigned long difference = now - shared->stamp;
|
||||
|
||||
if (!difference)
|
||||
return;
|
||||
in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
|
||||
in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
|
||||
if (in_flight_read)
|
||||
p->io_ticks[READ] += difference;
|
||||
if (in_flight_write)
|
||||
p->io_ticks[WRITE] += difference;
|
||||
if (in_flight_read + in_flight_write) {
|
||||
p->io_ticks_total += difference;
|
||||
p->time_in_queue += (in_flight_read + in_flight_write) * difference;
|
||||
}
|
||||
shared->stamp = now;
|
||||
}
|
||||
|
||||
static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
|
||||
unsigned long bi_rw, sector_t len, bool merged,
|
||||
bool end, unsigned long duration)
|
||||
{
|
||||
unsigned long idx = bi_rw & REQ_WRITE;
|
||||
struct dm_stat_shared *shared = &s->stat_shared[entry];
|
||||
struct dm_stat_percpu *p;
|
||||
|
||||
/*
|
||||
* For strict correctness we should use local_irq_disable/enable
|
||||
* instead of preempt_disable/enable.
|
||||
*
|
||||
* This is racy if the driver finishes bios from non-interrupt
|
||||
* context as well as from interrupt context or from more different
|
||||
* interrupts.
|
||||
*
|
||||
* However, the race only results in not counting some events,
|
||||
* so it is acceptable.
|
||||
*
|
||||
* part_stat_lock()/part_stat_unlock() have this race too.
|
||||
*/
|
||||
preempt_disable();
|
||||
p = &s->stat_percpu[smp_processor_id()][entry];
|
||||
|
||||
if (!end) {
|
||||
dm_stat_round(shared, p);
|
||||
atomic_inc(&shared->in_flight[idx]);
|
||||
} else {
|
||||
dm_stat_round(shared, p);
|
||||
atomic_dec(&shared->in_flight[idx]);
|
||||
p->sectors[idx] += len;
|
||||
p->ios[idx] += 1;
|
||||
p->merges[idx] += merged;
|
||||
p->ticks[idx] += duration;
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
|
||||
sector_t bi_sector, sector_t end_sector,
|
||||
bool end, unsigned long duration,
|
||||
struct dm_stats_aux *stats_aux)
|
||||
{
|
||||
sector_t rel_sector, offset, todo, fragment_len;
|
||||
size_t entry;
|
||||
|
||||
if (end_sector <= s->start || bi_sector >= s->end)
|
||||
return;
|
||||
if (unlikely(bi_sector < s->start)) {
|
||||
rel_sector = 0;
|
||||
todo = end_sector - s->start;
|
||||
} else {
|
||||
rel_sector = bi_sector - s->start;
|
||||
todo = end_sector - bi_sector;
|
||||
}
|
||||
if (unlikely(end_sector > s->end))
|
||||
todo -= (end_sector - s->end);
|
||||
|
||||
offset = dm_sector_div64(rel_sector, s->step);
|
||||
entry = rel_sector;
|
||||
do {
|
||||
if (WARN_ON_ONCE(entry >= s->n_entries)) {
|
||||
DMCRIT("Invalid area access in region id %d", s->id);
|
||||
return;
|
||||
}
|
||||
fragment_len = todo;
|
||||
if (fragment_len > s->step - offset)
|
||||
fragment_len = s->step - offset;
|
||||
dm_stat_for_entry(s, entry, bi_rw, fragment_len,
|
||||
stats_aux->merged, end, duration);
|
||||
todo -= fragment_len;
|
||||
entry++;
|
||||
offset = 0;
|
||||
} while (unlikely(todo != 0));
|
||||
}
|
||||
|
||||
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
|
||||
sector_t bi_sector, unsigned bi_sectors, bool end,
|
||||
unsigned long duration, struct dm_stats_aux *stats_aux)
|
||||
{
|
||||
struct dm_stat *s;
|
||||
sector_t end_sector;
|
||||
struct dm_stats_last_position *last;
|
||||
|
||||
if (unlikely(!bi_sectors))
|
||||
return;
|
||||
|
||||
end_sector = bi_sector + bi_sectors;
|
||||
|
||||
if (!end) {
|
||||
/*
|
||||
* A race condition can at worst result in the merged flag being
|
||||
* misrepresented, so we don't have to disable preemption here.
|
||||
*/
|
||||
last = __this_cpu_ptr(stats->last);
|
||||
stats_aux->merged =
|
||||
(bi_sector == (ACCESS_ONCE(last->last_sector) &&
|
||||
((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
|
||||
(ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
|
||||
));
|
||||
ACCESS_ONCE(last->last_sector) = end_sector;
|
||||
ACCESS_ONCE(last->last_rw) = bi_rw;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(s, &stats->list, list_entry)
|
||||
__dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, stats_aux);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
|
||||
struct dm_stat *s, size_t x)
|
||||
{
|
||||
int cpu;
|
||||
struct dm_stat_percpu *p;
|
||||
|
||||
local_irq_disable();
|
||||
p = &s->stat_percpu[smp_processor_id()][x];
|
||||
dm_stat_round(shared, p);
|
||||
local_irq_enable();
|
||||
|
||||
memset(&shared->tmp, 0, sizeof(shared->tmp));
|
||||
for_each_possible_cpu(cpu) {
|
||||
p = &s->stat_percpu[cpu][x];
|
||||
shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
|
||||
shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
|
||||
shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
|
||||
shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
|
||||
shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
|
||||
shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
|
||||
shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
|
||||
shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
|
||||
shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
|
||||
shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
|
||||
shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
|
||||
shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
|
||||
}
|
||||
}
|
||||
|
||||
static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
|
||||
bool init_tmp_percpu_totals)
|
||||
{
|
||||
size_t x;
|
||||
struct dm_stat_shared *shared;
|
||||
struct dm_stat_percpu *p;
|
||||
|
||||
for (x = idx_start; x < idx_end; x++) {
|
||||
shared = &s->stat_shared[x];
|
||||
if (init_tmp_percpu_totals)
|
||||
__dm_stat_init_temporary_percpu_totals(shared, s, x);
|
||||
local_irq_disable();
|
||||
p = &s->stat_percpu[smp_processor_id()][x];
|
||||
p->sectors[READ] -= shared->tmp.sectors[READ];
|
||||
p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
|
||||
p->ios[READ] -= shared->tmp.ios[READ];
|
||||
p->ios[WRITE] -= shared->tmp.ios[WRITE];
|
||||
p->merges[READ] -= shared->tmp.merges[READ];
|
||||
p->merges[WRITE] -= shared->tmp.merges[WRITE];
|
||||
p->ticks[READ] -= shared->tmp.ticks[READ];
|
||||
p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
|
||||
p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
|
||||
p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
|
||||
p->io_ticks_total -= shared->tmp.io_ticks_total;
|
||||
p->time_in_queue -= shared->tmp.time_in_queue;
|
||||
local_irq_enable();
|
||||
}
|
||||
}
|
||||
|
||||
static int dm_stats_clear(struct dm_stats *stats, int id)
|
||||
{
|
||||
struct dm_stat *s;
|
||||
|
||||
mutex_lock(&stats->mutex);
|
||||
|
||||
s = __dm_stats_find(stats, id);
|
||||
if (!s) {
|
||||
mutex_unlock(&stats->mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
__dm_stat_clear(s, 0, s->n_entries, true);
|
||||
|
||||
mutex_unlock(&stats->mutex);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is like jiffies_to_msec, but works for 64-bit values.
|
||||
*/
|
||||
static unsigned long long dm_jiffies_to_msec64(unsigned long long j)
|
||||
{
|
||||
unsigned long long result = 0;
|
||||
unsigned mult;
|
||||
|
||||
if (j)
|
||||
result = jiffies_to_msecs(j & 0x3fffff);
|
||||
if (j >= 1 << 22) {
|
||||
mult = jiffies_to_msecs(1 << 22);
|
||||
result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
|
||||
}
|
||||
if (j >= 1ULL << 44)
|
||||
result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int dm_stats_print(struct dm_stats *stats, int id,
|
||||
size_t idx_start, size_t idx_len,
|
||||
bool clear, char *result, unsigned maxlen)
|
||||
{
|
||||
unsigned sz = 0;
|
||||
struct dm_stat *s;
|
||||
size_t x;
|
||||
sector_t start, end, step;
|
||||
size_t idx_end;
|
||||
struct dm_stat_shared *shared;
|
||||
|
||||
/*
|
||||
* Output format:
|
||||
* <start_sector>+<length> counters
|
||||
*/
|
||||
|
||||
mutex_lock(&stats->mutex);
|
||||
|
||||
s = __dm_stats_find(stats, id);
|
||||
if (!s) {
|
||||
mutex_unlock(&stats->mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
idx_end = idx_start + idx_len;
|
||||
if (idx_end < idx_start ||
|
||||
idx_end > s->n_entries)
|
||||
idx_end = s->n_entries;
|
||||
|
||||
if (idx_start > idx_end)
|
||||
idx_start = idx_end;
|
||||
|
||||
step = s->step;
|
||||
start = s->start + (step * idx_start);
|
||||
|
||||
for (x = idx_start; x < idx_end; x++, start = end) {
|
||||
shared = &s->stat_shared[x];
|
||||
end = start + step;
|
||||
if (unlikely(end > s->end))
|
||||
end = s->end;
|
||||
|
||||
__dm_stat_init_temporary_percpu_totals(shared, s, x);
|
||||
|
||||
DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n",
|
||||
(unsigned long long)start,
|
||||
(unsigned long long)step,
|
||||
shared->tmp.ios[READ],
|
||||
shared->tmp.merges[READ],
|
||||
shared->tmp.sectors[READ],
|
||||
dm_jiffies_to_msec64(shared->tmp.ticks[READ]),
|
||||
shared->tmp.ios[WRITE],
|
||||
shared->tmp.merges[WRITE],
|
||||
shared->tmp.sectors[WRITE],
|
||||
dm_jiffies_to_msec64(shared->tmp.ticks[WRITE]),
|
||||
dm_stat_in_flight(shared),
|
||||
dm_jiffies_to_msec64(shared->tmp.io_ticks_total),
|
||||
dm_jiffies_to_msec64(shared->tmp.time_in_queue),
|
||||
dm_jiffies_to_msec64(shared->tmp.io_ticks[READ]),
|
||||
dm_jiffies_to_msec64(shared->tmp.io_ticks[WRITE]));
|
||||
|
||||
if (unlikely(sz + 1 >= maxlen))
|
||||
goto buffer_overflow;
|
||||
}
|
||||
|
||||
if (clear)
|
||||
__dm_stat_clear(s, idx_start, idx_end, false);
|
||||
|
||||
buffer_overflow:
|
||||
mutex_unlock(&stats->mutex);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
|
||||
{
|
||||
struct dm_stat *s;
|
||||
const char *new_aux_data;
|
||||
|
||||
mutex_lock(&stats->mutex);
|
||||
|
||||
s = __dm_stats_find(stats, id);
|
||||
if (!s) {
|
||||
mutex_unlock(&stats->mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
new_aux_data = kstrdup(aux_data, GFP_KERNEL);
|
||||
if (!new_aux_data) {
|
||||
mutex_unlock(&stats->mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
kfree(s->aux_data);
|
||||
s->aux_data = new_aux_data;
|
||||
|
||||
mutex_unlock(&stats->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int message_stats_create(struct mapped_device *md,
|
||||
unsigned argc, char **argv,
|
||||
char *result, unsigned maxlen)
|
||||
{
|
||||
int id;
|
||||
char dummy;
|
||||
unsigned long long start, end, len, step;
|
||||
unsigned divisor;
|
||||
const char *program_id, *aux_data;
|
||||
|
||||
/*
|
||||
* Input format:
|
||||
* <range> <step> [<program_id> [<aux_data>]]
|
||||
*/
|
||||
|
||||
if (argc < 3 || argc > 5)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(argv[1], "-")) {
|
||||
start = 0;
|
||||
len = dm_get_size(md);
|
||||
if (!len)
|
||||
len = 1;
|
||||
} else if (sscanf(argv[1], "%llu+%llu%c", &start, &len, &dummy) != 2 ||
|
||||
start != (sector_t)start || len != (sector_t)len)
|
||||
return -EINVAL;
|
||||
|
||||
end = start + len;
|
||||
if (start >= end)
|
||||
return -EINVAL;
|
||||
|
||||
if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
|
||||
step = end - start;
|
||||
if (do_div(step, divisor))
|
||||
step++;
|
||||
if (!step)
|
||||
step = 1;
|
||||
} else if (sscanf(argv[2], "%llu%c", &step, &dummy) != 1 ||
|
||||
step != (sector_t)step || !step)
|
||||
return -EINVAL;
|
||||
|
||||
program_id = "-";
|
||||
aux_data = "-";
|
||||
|
||||
if (argc > 3)
|
||||
program_id = argv[3];
|
||||
|
||||
if (argc > 4)
|
||||
aux_data = argv[4];
|
||||
|
||||
/*
|
||||
* If a buffer overflow happens after we created the region,
|
||||
* it's too late (the userspace would retry with a larger
|
||||
* buffer, but the region id that caused the overflow is already
|
||||
* leaked). So we must detect buffer overflow in advance.
|
||||
*/
|
||||
snprintf(result, maxlen, "%d", INT_MAX);
|
||||
if (dm_message_test_buffer_overflow(result, maxlen))
|
||||
return 1;
|
||||
|
||||
id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data,
|
||||
dm_internal_suspend, dm_internal_resume, md);
|
||||
if (id < 0)
|
||||
return id;
|
||||
|
||||
snprintf(result, maxlen, "%d", id);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int message_stats_delete(struct mapped_device *md,
|
||||
unsigned argc, char **argv)
|
||||
{
|
||||
int id;
|
||||
char dummy;
|
||||
|
||||
if (argc != 2)
|
||||
return -EINVAL;
|
||||
|
||||
if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
return dm_stats_delete(dm_get_stats(md), id);
|
||||
}
|
||||
|
||||
static int message_stats_clear(struct mapped_device *md,
|
||||
unsigned argc, char **argv)
|
||||
{
|
||||
int id;
|
||||
char dummy;
|
||||
|
||||
if (argc != 2)
|
||||
return -EINVAL;
|
||||
|
||||
if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
return dm_stats_clear(dm_get_stats(md), id);
|
||||
}
|
||||
|
||||
static int message_stats_list(struct mapped_device *md,
|
||||
unsigned argc, char **argv,
|
||||
char *result, unsigned maxlen)
|
||||
{
|
||||
int r;
|
||||
const char *program = NULL;
|
||||
|
||||
if (argc < 1 || argc > 2)
|
||||
return -EINVAL;
|
||||
|
||||
if (argc > 1) {
|
||||
program = kstrdup(argv[1], GFP_KERNEL);
|
||||
if (!program)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
|
||||
|
||||
kfree(program);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int message_stats_print(struct mapped_device *md,
|
||||
unsigned argc, char **argv, bool clear,
|
||||
char *result, unsigned maxlen)
|
||||
{
|
||||
int id;
|
||||
char dummy;
|
||||
unsigned long idx_start = 0, idx_len = ULONG_MAX;
|
||||
|
||||
if (argc != 2 && argc != 4)
|
||||
return -EINVAL;
|
||||
|
||||
if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (argc > 3) {
|
||||
if (strcmp(argv[2], "-") &&
|
||||
sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
|
||||
return -EINVAL;
|
||||
if (strcmp(argv[3], "-") &&
|
||||
sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
|
||||
result, maxlen);
|
||||
}
|
||||
|
||||
static int message_stats_set_aux(struct mapped_device *md,
|
||||
unsigned argc, char **argv)
|
||||
{
|
||||
int id;
|
||||
char dummy;
|
||||
|
||||
if (argc != 3)
|
||||
return -EINVAL;
|
||||
|
||||
if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
|
||||
}
|
||||
|
||||
int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
|
||||
char *result, unsigned maxlen)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (dm_request_based(md)) {
|
||||
DMWARN("Statistics are only supported for bio-based devices");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* All messages here must start with '@' */
|
||||
if (!strcasecmp(argv[0], "@stats_create"))
|
||||
r = message_stats_create(md, argc, argv, result, maxlen);
|
||||
else if (!strcasecmp(argv[0], "@stats_delete"))
|
||||
r = message_stats_delete(md, argc, argv);
|
||||
else if (!strcasecmp(argv[0], "@stats_clear"))
|
||||
r = message_stats_clear(md, argc, argv);
|
||||
else if (!strcasecmp(argv[0], "@stats_list"))
|
||||
r = message_stats_list(md, argc, argv, result, maxlen);
|
||||
else if (!strcasecmp(argv[0], "@stats_print"))
|
||||
r = message_stats_print(md, argc, argv, false, result, maxlen);
|
||||
else if (!strcasecmp(argv[0], "@stats_print_clear"))
|
||||
r = message_stats_print(md, argc, argv, true, result, maxlen);
|
||||
else if (!strcasecmp(argv[0], "@stats_set_aux"))
|
||||
r = message_stats_set_aux(md, argc, argv);
|
||||
else
|
||||
return 2; /* this wasn't a stats message */
|
||||
|
||||
if (r == -EINVAL)
|
||||
DMWARN("Invalid parameters for message %s", argv[0]);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int __init dm_statistics_init(void)
|
||||
{
|
||||
dm_stat_need_rcu_barrier = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dm_statistics_exit(void)
|
||||
{
|
||||
if (dm_stat_need_rcu_barrier)
|
||||
rcu_barrier();
|
||||
if (WARN_ON(shared_memory_amount))
|
||||
DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
|
||||
}
|
||||
|
||||
module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
|
||||
MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");
|
|
@ -0,0 +1,40 @@
|
|||
#ifndef DM_STATS_H
|
||||
#define DM_STATS_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
int dm_statistics_init(void);
|
||||
void dm_statistics_exit(void);
|
||||
|
||||
struct dm_stats {
|
||||
struct mutex mutex;
|
||||
struct list_head list; /* list of struct dm_stat */
|
||||
struct dm_stats_last_position __percpu *last;
|
||||
sector_t last_sector;
|
||||
unsigned last_rw;
|
||||
};
|
||||
|
||||
struct dm_stats_aux {
|
||||
bool merged;
|
||||
};
|
||||
|
||||
void dm_stats_init(struct dm_stats *st);
|
||||
void dm_stats_cleanup(struct dm_stats *st);
|
||||
|
||||
struct mapped_device;
|
||||
|
||||
int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
|
||||
char *result, unsigned maxlen);
|
||||
|
||||
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
|
||||
sector_t bi_sector, unsigned bi_sectors, bool end,
|
||||
unsigned long duration, struct dm_stats_aux *aux);
|
||||
|
||||
static inline bool dm_stats_used(struct dm_stats *st)
|
||||
{
|
||||
return !list_empty(&st->list);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -4,6 +4,7 @@
|
|||
* This file is released under the GPL.
|
||||
*/
|
||||
|
||||
#include "dm.h"
|
||||
#include <linux/device-mapper.h>
|
||||
|
||||
#include <linux/module.h>
|
||||
|
|
|
@ -860,14 +860,17 @@ EXPORT_SYMBOL(dm_consume_args);
|
|||
static int dm_table_set_type(struct dm_table *t)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned bio_based = 0, request_based = 0;
|
||||
unsigned bio_based = 0, request_based = 0, hybrid = 0;
|
||||
struct dm_target *tgt;
|
||||
struct dm_dev_internal *dd;
|
||||
struct list_head *devices;
|
||||
unsigned live_md_type;
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
tgt = t->targets + i;
|
||||
if (dm_target_request_based(tgt))
|
||||
if (dm_target_hybrid(tgt))
|
||||
hybrid = 1;
|
||||
else if (dm_target_request_based(tgt))
|
||||
request_based = 1;
|
||||
else
|
||||
bio_based = 1;
|
||||
|
@ -879,6 +882,19 @@ static int dm_table_set_type(struct dm_table *t)
|
|||
}
|
||||
}
|
||||
|
||||
if (hybrid && !bio_based && !request_based) {
|
||||
/*
|
||||
* The targets can work either way.
|
||||
* Determine the type from the live device.
|
||||
* Default to bio-based if device is new.
|
||||
*/
|
||||
live_md_type = dm_get_md_type(t->md);
|
||||
if (live_md_type == DM_TYPE_REQUEST_BASED)
|
||||
request_based = 1;
|
||||
else
|
||||
bio_based = 1;
|
||||
}
|
||||
|
||||
if (bio_based) {
|
||||
/* We must use this table as bio-based */
|
||||
t->type = DM_TYPE_BIO_BASED;
|
||||
|
|
|
@ -131,12 +131,19 @@ static int io_err_map(struct dm_target *tt, struct bio *bio)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
static int io_err_map_rq(struct dm_target *ti, struct request *clone,
|
||||
union map_info *map_context)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static struct target_type error_target = {
|
||||
.name = "error",
|
||||
.version = {1, 1, 0},
|
||||
.version = {1, 2, 0},
|
||||
.ctr = io_err_ctr,
|
||||
.dtr = io_err_dtr,
|
||||
.map = io_err_map,
|
||||
.map_rq = io_err_map_rq,
|
||||
};
|
||||
|
||||
int __init dm_target_init(void)
|
||||
|
|
|
@ -887,7 +887,8 @@ static int commit(struct pool *pool)
|
|||
|
||||
r = dm_pool_commit_metadata(pool->pmd);
|
||||
if (r)
|
||||
DMERR_LIMIT("commit failed: error = %d", r);
|
||||
DMERR_LIMIT("%s: commit failed: error = %d",
|
||||
dm_device_name(pool->pool_md), r);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -917,6 +918,13 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
|
|||
unsigned long flags;
|
||||
struct pool *pool = tc->pool;
|
||||
|
||||
/*
|
||||
* Once no_free_space is set we must not allow allocation to succeed.
|
||||
* Otherwise it is difficult to explain, debug, test and support.
|
||||
*/
|
||||
if (pool->no_free_space)
|
||||
return -ENOSPC;
|
||||
|
||||
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -931,31 +939,30 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
|
|||
}
|
||||
|
||||
if (!free_blocks) {
|
||||
if (pool->no_free_space)
|
||||
/*
|
||||
* Try to commit to see if that will free up some
|
||||
* more space.
|
||||
*/
|
||||
(void) commit_or_fallback(pool);
|
||||
|
||||
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* If we still have no space we set a flag to avoid
|
||||
* doing all this checking and return -ENOSPC. This
|
||||
* flag serves as a latch that disallows allocations from
|
||||
* this pool until the admin takes action (e.g. resize or
|
||||
* table reload).
|
||||
*/
|
||||
if (!free_blocks) {
|
||||
DMWARN("%s: no free space available.",
|
||||
dm_device_name(pool->pool_md));
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
pool->no_free_space = 1;
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
return -ENOSPC;
|
||||
else {
|
||||
/*
|
||||
* Try to commit to see if that will free up some
|
||||
* more space.
|
||||
*/
|
||||
(void) commit_or_fallback(pool);
|
||||
|
||||
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* If we still have no space we set a flag to avoid
|
||||
* doing all this checking and return -ENOSPC.
|
||||
*/
|
||||
if (!free_blocks) {
|
||||
DMWARN("%s: no free space available.",
|
||||
dm_device_name(pool->pool_md));
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
pool->no_free_space = 1;
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
return -ENOSPC;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1085,6 +1092,7 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
|
|||
{
|
||||
int r;
|
||||
dm_block_t data_block;
|
||||
struct pool *pool = tc->pool;
|
||||
|
||||
r = alloc_data_block(tc, &data_block);
|
||||
switch (r) {
|
||||
|
@ -1094,13 +1102,14 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
|
|||
break;
|
||||
|
||||
case -ENOSPC:
|
||||
no_space(tc->pool, cell);
|
||||
no_space(pool, cell);
|
||||
break;
|
||||
|
||||
default:
|
||||
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
|
||||
__func__, r);
|
||||
cell_error(tc->pool, cell);
|
||||
set_pool_mode(pool, PM_READ_ONLY);
|
||||
cell_error(pool, cell);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1386,7 +1395,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
|
|||
|
||||
switch (mode) {
|
||||
case PM_FAIL:
|
||||
DMERR("switching pool to failure mode");
|
||||
DMERR("%s: switching pool to failure mode",
|
||||
dm_device_name(pool->pool_md));
|
||||
pool->process_bio = process_bio_fail;
|
||||
pool->process_discard = process_bio_fail;
|
||||
pool->process_prepared_mapping = process_prepared_mapping_fail;
|
||||
|
@ -1394,10 +1404,12 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
|
|||
break;
|
||||
|
||||
case PM_READ_ONLY:
|
||||
DMERR("switching pool to read-only mode");
|
||||
DMERR("%s: switching pool to read-only mode",
|
||||
dm_device_name(pool->pool_md));
|
||||
r = dm_pool_abort_metadata(pool->pmd);
|
||||
if (r) {
|
||||
DMERR("aborting transaction failed");
|
||||
DMERR("%s: aborting transaction failed",
|
||||
dm_device_name(pool->pool_md));
|
||||
set_pool_mode(pool, PM_FAIL);
|
||||
} else {
|
||||
dm_pool_metadata_read_only(pool->pmd);
|
||||
|
@ -2156,19 +2168,22 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
|
|||
|
||||
r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
|
||||
if (r) {
|
||||
DMERR("failed to retrieve data device size");
|
||||
DMERR("%s: failed to retrieve data device size",
|
||||
dm_device_name(pool->pool_md));
|
||||
return r;
|
||||
}
|
||||
|
||||
if (data_size < sb_data_size) {
|
||||
DMERR("pool target (%llu blocks) too small: expected %llu",
|
||||
DMERR("%s: pool target (%llu blocks) too small: expected %llu",
|
||||
dm_device_name(pool->pool_md),
|
||||
(unsigned long long)data_size, sb_data_size);
|
||||
return -EINVAL;
|
||||
|
||||
} else if (data_size > sb_data_size) {
|
||||
r = dm_pool_resize_data_dev(pool->pmd, data_size);
|
||||
if (r) {
|
||||
DMERR("failed to resize data device");
|
||||
DMERR("%s: failed to resize data device",
|
||||
dm_device_name(pool->pool_md));
|
||||
set_pool_mode(pool, PM_READ_ONLY);
|
||||
return r;
|
||||
}
|
||||
|
@ -2192,19 +2207,22 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
|
|||
|
||||
r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
|
||||
if (r) {
|
||||
DMERR("failed to retrieve data device size");
|
||||
DMERR("%s: failed to retrieve metadata device size",
|
||||
dm_device_name(pool->pool_md));
|
||||
return r;
|
||||
}
|
||||
|
||||
if (metadata_dev_size < sb_metadata_dev_size) {
|
||||
DMERR("metadata device (%llu blocks) too small: expected %llu",
|
||||
DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
|
||||
dm_device_name(pool->pool_md),
|
||||
metadata_dev_size, sb_metadata_dev_size);
|
||||
return -EINVAL;
|
||||
|
||||
} else if (metadata_dev_size > sb_metadata_dev_size) {
|
||||
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
|
||||
if (r) {
|
||||
DMERR("failed to resize metadata device");
|
||||
DMERR("%s: failed to resize metadata device",
|
||||
dm_device_name(pool->pool_md));
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -2530,37 +2548,43 @@ static void pool_status(struct dm_target *ti, status_type_t type,
|
|||
|
||||
r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
|
||||
if (r) {
|
||||
DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
|
||||
DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
|
||||
dm_device_name(pool->pool_md), r);
|
||||
goto err;
|
||||
}
|
||||
|
||||
r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
|
||||
if (r) {
|
||||
DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
|
||||
DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
|
||||
dm_device_name(pool->pool_md), r);
|
||||
goto err;
|
||||
}
|
||||
|
||||
r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
|
||||
if (r) {
|
||||
DMERR("dm_pool_get_metadata_dev_size returned %d", r);
|
||||
DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
|
||||
dm_device_name(pool->pool_md), r);
|
||||
goto err;
|
||||
}
|
||||
|
||||
r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
|
||||
if (r) {
|
||||
DMERR("dm_pool_get_free_block_count returned %d", r);
|
||||
DMERR("%s: dm_pool_get_free_block_count returned %d",
|
||||
dm_device_name(pool->pool_md), r);
|
||||
goto err;
|
||||
}
|
||||
|
||||
r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
|
||||
if (r) {
|
||||
DMERR("dm_pool_get_data_dev_size returned %d", r);
|
||||
DMERR("%s: dm_pool_get_data_dev_size returned %d",
|
||||
dm_device_name(pool->pool_md), r);
|
||||
goto err;
|
||||
}
|
||||
|
||||
r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
|
||||
if (r) {
|
||||
DMERR("dm_pool_get_metadata_snap returned %d", r);
|
||||
DMERR("%s: dm_pool_get_metadata_snap returned %d",
|
||||
dm_device_name(pool->pool_md), r);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -2648,9 +2672,17 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
|||
{
|
||||
struct pool_c *pt = ti->private;
|
||||
struct pool *pool = pt->pool;
|
||||
uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
|
||||
|
||||
blk_limits_io_min(limits, 0);
|
||||
blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
|
||||
/*
|
||||
* If the system-determined stacked limits are compatible with the
|
||||
* pool's blocksize (io_opt is a factor) do not override them.
|
||||
*/
|
||||
if (io_opt_sectors < pool->sectors_per_block ||
|
||||
do_div(io_opt_sectors, pool->sectors_per_block)) {
|
||||
blk_limits_io_min(limits, 0);
|
||||
blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* pt->adjusted_pf is a staging area for the actual features to use.
|
||||
|
@ -2669,7 +2701,7 @@ static struct target_type pool_target = {
|
|||
.name = "thin-pool",
|
||||
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
|
||||
DM_TARGET_IMMUTABLE,
|
||||
.version = {1, 8, 0},
|
||||
.version = {1, 9, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = pool_ctr,
|
||||
.dtr = pool_dtr,
|
||||
|
@ -2956,7 +2988,7 @@ static int thin_iterate_devices(struct dm_target *ti,
|
|||
|
||||
static struct target_type thin_target = {
|
||||
.name = "thin",
|
||||
.version = {1, 8, 0},
|
||||
.version = {1, 9, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = thin_ctr,
|
||||
.dtr = thin_dtr,
|
||||
|
|
|
@ -60,6 +60,7 @@ struct dm_io {
|
|||
struct bio *bio;
|
||||
unsigned long start_time;
|
||||
spinlock_t endio_lock;
|
||||
struct dm_stats_aux stats_aux;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -198,6 +199,8 @@ struct mapped_device {
|
|||
|
||||
/* zero-length flush that will be cloned and submitted to targets */
|
||||
struct bio flush_bio;
|
||||
|
||||
struct dm_stats stats;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -269,6 +272,7 @@ static int (*_inits[])(void) __initdata = {
|
|||
dm_io_init,
|
||||
dm_kcopyd_init,
|
||||
dm_interface_init,
|
||||
dm_statistics_init,
|
||||
};
|
||||
|
||||
static void (*_exits[])(void) = {
|
||||
|
@ -279,6 +283,7 @@ static void (*_exits[])(void) = {
|
|||
dm_io_exit,
|
||||
dm_kcopyd_exit,
|
||||
dm_interface_exit,
|
||||
dm_statistics_exit,
|
||||
};
|
||||
|
||||
static int __init dm_init(void)
|
||||
|
@ -384,6 +389,16 @@ int dm_lock_for_deletion(struct mapped_device *md)
|
|||
return r;
|
||||
}
|
||||
|
||||
sector_t dm_get_size(struct mapped_device *md)
|
||||
{
|
||||
return get_capacity(md->disk);
|
||||
}
|
||||
|
||||
struct dm_stats *dm_get_stats(struct mapped_device *md)
|
||||
{
|
||||
return &md->stats;
|
||||
}
|
||||
|
||||
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
{
|
||||
struct mapped_device *md = bdev->bd_disk->private_data;
|
||||
|
@ -466,8 +481,9 @@ static int md_in_flight(struct mapped_device *md)
|
|||
static void start_io_acct(struct dm_io *io)
|
||||
{
|
||||
struct mapped_device *md = io->md;
|
||||
struct bio *bio = io->bio;
|
||||
int cpu;
|
||||
int rw = bio_data_dir(io->bio);
|
||||
int rw = bio_data_dir(bio);
|
||||
|
||||
io->start_time = jiffies;
|
||||
|
||||
|
@ -476,6 +492,10 @@ static void start_io_acct(struct dm_io *io)
|
|||
part_stat_unlock();
|
||||
atomic_set(&dm_disk(md)->part0.in_flight[rw],
|
||||
atomic_inc_return(&md->pending[rw]));
|
||||
|
||||
if (unlikely(dm_stats_used(&md->stats)))
|
||||
dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
|
||||
bio_sectors(bio), false, 0, &io->stats_aux);
|
||||
}
|
||||
|
||||
static void end_io_acct(struct dm_io *io)
|
||||
|
@ -491,6 +511,10 @@ static void end_io_acct(struct dm_io *io)
|
|||
part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
|
||||
part_stat_unlock();
|
||||
|
||||
if (unlikely(dm_stats_used(&md->stats)))
|
||||
dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
|
||||
bio_sectors(bio), true, duration, &io->stats_aux);
|
||||
|
||||
/*
|
||||
* After this is decremented the bio must not be touched if it is
|
||||
* a flush.
|
||||
|
@ -1519,7 +1543,7 @@ static void _dm_request(struct request_queue *q, struct bio *bio)
|
|||
return;
|
||||
}
|
||||
|
||||
static int dm_request_based(struct mapped_device *md)
|
||||
int dm_request_based(struct mapped_device *md)
|
||||
{
|
||||
return blk_queue_stackable(md->queue);
|
||||
}
|
||||
|
@ -1946,8 +1970,7 @@ static struct mapped_device *alloc_dev(int minor)
|
|||
add_disk(md->disk);
|
||||
format_dev_t(md->name, MKDEV(_major, minor));
|
||||
|
||||
md->wq = alloc_workqueue("kdmflush",
|
||||
WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
|
||||
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
|
||||
if (!md->wq)
|
||||
goto bad_thread;
|
||||
|
||||
|
@ -1959,6 +1982,8 @@ static struct mapped_device *alloc_dev(int minor)
|
|||
md->flush_bio.bi_bdev = md->bdev;
|
||||
md->flush_bio.bi_rw = WRITE_FLUSH;
|
||||
|
||||
dm_stats_init(&md->stats);
|
||||
|
||||
/* Populate the mapping, nobody knows we exist yet */
|
||||
spin_lock(&_minor_lock);
|
||||
old_md = idr_replace(&_minor_idr, md, minor);
|
||||
|
@ -2010,6 +2035,7 @@ static void free_dev(struct mapped_device *md)
|
|||
|
||||
put_disk(md->disk);
|
||||
blk_cleanup_queue(md->queue);
|
||||
dm_stats_cleanup(&md->stats);
|
||||
module_put(THIS_MODULE);
|
||||
kfree(md);
|
||||
}
|
||||
|
@ -2151,7 +2177,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
|||
/*
|
||||
* Wipe any geometry if the size of the table changed.
|
||||
*/
|
||||
if (size != get_capacity(md->disk))
|
||||
if (size != dm_get_size(md))
|
||||
memset(&md->geometry, 0, sizeof(md->geometry));
|
||||
|
||||
__set_size(md, size);
|
||||
|
@ -2236,11 +2262,13 @@ void dm_unlock_md_type(struct mapped_device *md)
|
|||
|
||||
void dm_set_md_type(struct mapped_device *md, unsigned type)
|
||||
{
|
||||
BUG_ON(!mutex_is_locked(&md->type_lock));
|
||||
md->type = type;
|
||||
}
|
||||
|
||||
unsigned dm_get_md_type(struct mapped_device *md)
|
||||
{
|
||||
BUG_ON(!mutex_is_locked(&md->type_lock));
|
||||
return md->type;
|
||||
}
|
||||
|
||||
|
@ -2695,6 +2723,38 @@ int dm_resume(struct mapped_device *md)
|
|||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal suspend/resume works like userspace-driven suspend. It waits
|
||||
* until all bios finish and prevents issuing new bios to the target drivers.
|
||||
* It may be used only from the kernel.
|
||||
*
|
||||
* Internal suspend holds md->suspend_lock, which prevents interaction with
|
||||
* userspace-driven suspend.
|
||||
*/
|
||||
|
||||
void dm_internal_suspend(struct mapped_device *md)
|
||||
{
|
||||
mutex_lock(&md->suspend_lock);
|
||||
if (dm_suspended_md(md))
|
||||
return;
|
||||
|
||||
set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
|
||||
synchronize_srcu(&md->io_barrier);
|
||||
flush_workqueue(md->wq);
|
||||
dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
void dm_internal_resume(struct mapped_device *md)
|
||||
{
|
||||
if (dm_suspended_md(md))
|
||||
goto done;
|
||||
|
||||
dm_queue_flush(md);
|
||||
|
||||
done:
|
||||
mutex_unlock(&md->suspend_lock);
|
||||
}
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Event notification.
|
||||
*---------------------------------------------------------------*/
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/hdreg.h>
|
||||
|
||||
#include "dm-stats.h"
|
||||
|
||||
/*
|
||||
* Suspend feature flags
|
||||
*/
|
||||
|
@ -88,11 +90,22 @@ int dm_setup_md_queue(struct mapped_device *md);
|
|||
*/
|
||||
#define dm_target_is_valid(t) ((t)->table)
|
||||
|
||||
/*
|
||||
* To check whether the target type is bio-based or not (request-based).
|
||||
*/
|
||||
#define dm_target_bio_based(t) ((t)->type->map != NULL)
|
||||
|
||||
/*
|
||||
* To check whether the target type is request-based or not (bio-based).
|
||||
*/
|
||||
#define dm_target_request_based(t) ((t)->type->map_rq != NULL)
|
||||
|
||||
/*
|
||||
* To check whether the target type is a hybrid (capable of being
|
||||
* either request-based or bio-based).
|
||||
*/
|
||||
#define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* A registry of target types.
|
||||
*---------------------------------------------------------------*/
|
||||
|
@ -146,10 +159,16 @@ void dm_destroy(struct mapped_device *md);
|
|||
void dm_destroy_immediate(struct mapped_device *md);
|
||||
int dm_open_count(struct mapped_device *md);
|
||||
int dm_lock_for_deletion(struct mapped_device *md);
|
||||
int dm_request_based(struct mapped_device *md);
|
||||
sector_t dm_get_size(struct mapped_device *md);
|
||||
struct dm_stats *dm_get_stats(struct mapped_device *md);
|
||||
|
||||
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
|
||||
unsigned cookie);
|
||||
|
||||
void dm_internal_suspend(struct mapped_device *md);
|
||||
void dm_internal_resume(struct mapped_device *md);
|
||||
|
||||
int dm_io_init(void);
|
||||
void dm_io_exit(void);
|
||||
|
||||
|
@ -162,4 +181,12 @@ void dm_kcopyd_exit(void);
|
|||
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
|
||||
void dm_free_md_mempools(struct dm_md_mempools *pools);
|
||||
|
||||
/*
|
||||
* Helpers that are used by DM core
|
||||
*/
|
||||
static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
|
||||
{
|
||||
return !maxlen || strlen(result) + 1 >= maxlen;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -615,6 +615,11 @@ int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
|
||||
|
||||
void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
|
||||
{
|
||||
dm_bufio_prefetch(bm->bufio, b, 1);
|
||||
}
|
||||
|
||||
void dm_bm_set_read_only(struct dm_block_manager *bm)
|
||||
{
|
||||
bm->read_only = true;
|
||||
|
|
|
@ -108,6 +108,11 @@ int dm_bm_unlock(struct dm_block *b);
|
|||
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
|
||||
struct dm_block *superblock);
|
||||
|
||||
/*
|
||||
* Request data be prefetched into the cache.
|
||||
*/
|
||||
void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
|
||||
|
||||
/*
|
||||
* Switches the bm to a read only mode. Once read-only mode
|
||||
* has been entered the following functions will return -EPERM.
|
||||
|
|
|
@ -161,6 +161,7 @@ struct frame {
|
|||
};
|
||||
|
||||
struct del_stack {
|
||||
struct dm_btree_info *info;
|
||||
struct dm_transaction_manager *tm;
|
||||
int top;
|
||||
struct frame spine[MAX_SPINE_DEPTH];
|
||||
|
@ -183,6 +184,20 @@ static int unprocessed_frames(struct del_stack *s)
|
|||
return s->top >= 0;
|
||||
}
|
||||
|
||||
static void prefetch_children(struct del_stack *s, struct frame *f)
|
||||
{
|
||||
unsigned i;
|
||||
struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
|
||||
|
||||
for (i = 0; i < f->nr_children; i++)
|
||||
dm_bm_prefetch(bm, value64(f->n, i));
|
||||
}
|
||||
|
||||
static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
|
||||
{
|
||||
return f->level < (info->levels - 1);
|
||||
}
|
||||
|
||||
static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
|
||||
{
|
||||
int r;
|
||||
|
@ -205,6 +220,7 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
|
|||
dm_tm_dec(s->tm, b);
|
||||
|
||||
else {
|
||||
uint32_t flags;
|
||||
struct frame *f = s->spine + ++s->top;
|
||||
|
||||
r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
|
||||
|
@ -217,6 +233,10 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
|
|||
f->level = level;
|
||||
f->nr_children = le32_to_cpu(f->n->header.nr_entries);
|
||||
f->current_child = 0;
|
||||
|
||||
flags = le32_to_cpu(f->n->header.flags);
|
||||
if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
|
||||
prefetch_children(s, f);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -230,11 +250,6 @@ static void pop_frame(struct del_stack *s)
|
|||
dm_tm_unlock(s->tm, f->b);
|
||||
}
|
||||
|
||||
static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
|
||||
{
|
||||
return f->level < (info->levels - 1);
|
||||
}
|
||||
|
||||
int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
|
||||
{
|
||||
int r;
|
||||
|
@ -243,6 +258,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
|
|||
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
s->info = info;
|
||||
s->tm = info->tm;
|
||||
s->top = -1;
|
||||
|
||||
|
@ -287,7 +303,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
|
|||
info->value_type.dec(info->value_type.context,
|
||||
value_ptr(f->n, i));
|
||||
}
|
||||
f->current_child = f->nr_children;
|
||||
pop_frame(s);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -292,16 +292,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
|
|||
return dm_tm_unlock(ll->tm, blk);
|
||||
}
|
||||
|
||||
int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
|
||||
static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
|
||||
uint32_t *result)
|
||||
{
|
||||
__le32 le_rc;
|
||||
int r = sm_ll_lookup_bitmap(ll, b, result);
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (*result != 3)
|
||||
return r;
|
||||
int r;
|
||||
|
||||
r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
|
||||
if (r < 0)
|
||||
|
@ -312,6 +307,19 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
|
|||
return r;
|
||||
}
|
||||
|
||||
int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
|
||||
{
|
||||
int r = sm_ll_lookup_bitmap(ll, b, result);
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (*result != 3)
|
||||
return r;
|
||||
|
||||
return sm_ll_lookup_big_ref_count(ll, b, result);
|
||||
}
|
||||
|
||||
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
|
||||
dm_block_t end, dm_block_t *result)
|
||||
{
|
||||
|
@ -372,11 +380,12 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
|
|||
return -ENOSPC;
|
||||
}
|
||||
|
||||
int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
|
||||
uint32_t ref_count, enum allocation_event *ev)
|
||||
static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
|
||||
uint32_t (*mutator)(void *context, uint32_t old),
|
||||
void *context, enum allocation_event *ev)
|
||||
{
|
||||
int r;
|
||||
uint32_t bit, old;
|
||||
uint32_t bit, old, ref_count;
|
||||
struct dm_block *nb;
|
||||
dm_block_t index = b;
|
||||
struct disk_index_entry ie_disk;
|
||||
|
@ -399,6 +408,14 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
|
|||
bm_le = dm_bitmap_data(nb);
|
||||
old = sm_lookup_bitmap(bm_le, bit);
|
||||
|
||||
if (old > 2) {
|
||||
r = sm_ll_lookup_big_ref_count(ll, b, &old);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
ref_count = mutator(context, old);
|
||||
|
||||
if (ref_count <= 2) {
|
||||
sm_set_bitmap(bm_le, bit, ref_count);
|
||||
|
||||
|
@ -448,31 +465,35 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
|
|||
return ll->save_ie(ll, index, &ie_disk);
|
||||
}
|
||||
|
||||
static uint32_t set_ref_count(void *context, uint32_t old)
|
||||
{
|
||||
return *((uint32_t *) context);
|
||||
}
|
||||
|
||||
int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
|
||||
uint32_t ref_count, enum allocation_event *ev)
|
||||
{
|
||||
return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
|
||||
}
|
||||
|
||||
static uint32_t inc_ref_count(void *context, uint32_t old)
|
||||
{
|
||||
return old + 1;
|
||||
}
|
||||
|
||||
int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
|
||||
{
|
||||
int r;
|
||||
uint32_t rc;
|
||||
return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
|
||||
}
|
||||
|
||||
r = sm_ll_lookup(ll, b, &rc);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return sm_ll_insert(ll, b, rc + 1, ev);
|
||||
static uint32_t dec_ref_count(void *context, uint32_t old)
|
||||
{
|
||||
return old - 1;
|
||||
}
|
||||
|
||||
int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
|
||||
{
|
||||
int r;
|
||||
uint32_t rc;
|
||||
|
||||
r = sm_ll_lookup(ll, b, &rc);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (!rc)
|
||||
return -EINVAL;
|
||||
|
||||
return sm_ll_insert(ll, b, rc - 1, ev);
|
||||
return sm_ll_mutate(ll, b, dec_ref_count, NULL, ev);
|
||||
}
|
||||
|
||||
int sm_ll_commit(struct ll_disk *ll)
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
struct dm_dev;
|
||||
|
@ -550,6 +551,14 @@ extern struct ratelimit_state dm_ratelimit_state;
|
|||
#define DM_MAPIO_REMAPPED 1
|
||||
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
|
||||
|
||||
#define dm_sector_div64(x, y)( \
|
||||
{ \
|
||||
u64 _res; \
|
||||
(x) = div64_u64_rem(x, y, &_res); \
|
||||
_res; \
|
||||
} \
|
||||
)
|
||||
|
||||
/*
|
||||
* Ceiling(n / sz)
|
||||
*/
|
||||
|
|
|
@ -30,6 +30,15 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
|
|||
return dividend / divisor;
|
||||
}
|
||||
|
||||
/**
|
||||
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
|
||||
*/
|
||||
static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
|
||||
{
|
||||
*remainder = dividend % divisor;
|
||||
return dividend / divisor;
|
||||
}
|
||||
|
||||
/**
|
||||
* div64_u64 - unsigned 64bit divide with 64bit divisor
|
||||
*/
|
||||
|
@ -63,6 +72,10 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
|
|||
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
|
||||
#endif
|
||||
|
||||
#ifndef div64_u64_rem
|
||||
extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
|
||||
#endif
|
||||
|
||||
#ifndef div64_u64
|
||||
extern u64 div64_u64(u64 dividend, u64 divisor);
|
||||
#endif
|
||||
|
|
|
@ -267,9 +267,9 @@ enum {
|
|||
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
|
||||
|
||||
#define DM_VERSION_MAJOR 4
|
||||
#define DM_VERSION_MINOR 25
|
||||
#define DM_VERSION_MINOR 26
|
||||
#define DM_VERSION_PATCHLEVEL 0
|
||||
#define DM_VERSION_EXTRA "-ioctl (2013-06-26)"
|
||||
#define DM_VERSION_EXTRA "-ioctl (2013-08-15)"
|
||||
|
||||
/* Status bits */
|
||||
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
|
||||
|
|
40
lib/div64.c
40
lib/div64.c
|
@ -78,6 +78,46 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
|
|||
EXPORT_SYMBOL(div_s64_rem);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
|
||||
* @dividend: 64bit dividend
|
||||
* @divisor: 64bit divisor
|
||||
* @remainder: 64bit remainder
|
||||
*
|
||||
* This implementation is a comparable to algorithm used by div64_u64.
|
||||
* But this operation, which includes math for calculating the remainder,
|
||||
* is kept distinct to avoid slowing down the div64_u64 operation on 32bit
|
||||
* systems.
|
||||
*/
|
||||
#ifndef div64_u64_rem
|
||||
u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
|
||||
{
|
||||
u32 high = divisor >> 32;
|
||||
u64 quot;
|
||||
|
||||
if (high == 0) {
|
||||
u32 rem32;
|
||||
quot = div_u64_rem(dividend, divisor, &rem32);
|
||||
*remainder = rem32;
|
||||
} else {
|
||||
int n = 1 + fls(high);
|
||||
quot = div_u64(dividend >> n, divisor >> n);
|
||||
|
||||
if (quot != 0)
|
||||
quot--;
|
||||
|
||||
*remainder = dividend - quot * divisor;
|
||||
if (*remainder >= divisor) {
|
||||
quot++;
|
||||
*remainder -= divisor;
|
||||
}
|
||||
}
|
||||
|
||||
return quot;
|
||||
}
|
||||
EXPORT_SYMBOL(div64_u64_rem);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* div64_u64 - unsigned 64bit divide with 64bit divisor
|
||||
* @dividend: 64bit dividend
|
||||
|
|
Loading…
Reference in New Issue