A handful of stable fixes for DM:
- fix thin target to always zero-fill reads to unprovisioned blocks - fix to interlock device destruction's suspend from internal suspends - fix 2 snapshot exception store handover bugs - fix dm-io to cope with DISCARD and WRITE_SAME capabilities changing -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJVDIJ8AAoJEMUj8QotnQNaynEIAK4Q3mvOeI6PIwuC2iOWRglJ C0xzTKMR6VDQeM/uMRLeiiqxdPs6b89OdSmJHKJYOrdsusjG0a4IBAO9vDayb6sW AxOlbnpXdoiH3cqQ4dISJIfS9qM49qGM40CAflcLKYsGfLnstVLt+4l9HaWaRrHj b2kAC+I6PDDybFlKD5KsMB56sjzhhqg/lnnwkY2omV4vLHf6RrhVhK5gcEPF7VN0 SJ5HKSNBHQnpYSEECHXkxGvZ/ZKTe9n7q0t6Q3nnTSUFTXS6esgTsK1q2AykADDR 3W1LkrAAFP31AWKPkUwCEe3C8Rk3rCsrq2H14woWX1/UxSXNPlMYCU50ak4TVmc= =Svyk -----END PGP SIGNATURE----- Merge tag 'dm-4.0-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull devicemapper fixes from Mike Snitzer: "A handful of stable fixes for DM: - fix thin target to always zero-fill reads to unprovisioned blocks - fix to interlock device destruction's suspend from internal suspends - fix 2 snapshot exception store handover bugs - fix dm-io to cope with DISCARD and WRITE_SAME capabilities changing" * tag 'dm-4.0-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm io: deal with wandering queue limits when handling REQ_DISCARD and REQ_WRITE_SAME dm snapshot: suspend merging snapshot when doing exception handover dm snapshot: suspend origin when doing exception handover dm: hold suspend_lock while suspending device during device deletion dm thin: fix to consistently zero-fill reads to unprovisioned blocks
This commit is contained in:
commit
da6b9a2049
|
@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
|
|||
struct request_queue *q = bdev_get_queue(where->bdev);
|
||||
unsigned short logical_block_size = queue_logical_block_size(q);
|
||||
sector_t num_sectors;
|
||||
unsigned int uninitialized_var(special_cmd_max_sectors);
|
||||
|
||||
/* Reject unsupported discard requests */
|
||||
if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
|
||||
/*
|
||||
* Reject unsupported discard and write same requests.
|
||||
*/
|
||||
if (rw & REQ_DISCARD)
|
||||
special_cmd_max_sectors = q->limits.max_discard_sectors;
|
||||
else if (rw & REQ_WRITE_SAME)
|
||||
special_cmd_max_sectors = q->limits.max_write_same_sectors;
|
||||
if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
|
||||
dec_count(io, region, -EOPNOTSUPP);
|
||||
return;
|
||||
}
|
||||
|
@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
|
|||
store_io_and_region_in_bio(bio, io, region);
|
||||
|
||||
if (rw & REQ_DISCARD) {
|
||||
num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
|
||||
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
|
||||
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
|
||||
remaining -= num_sectors;
|
||||
} else if (rw & REQ_WRITE_SAME) {
|
||||
|
@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
|
|||
*/
|
||||
dp->get_page(dp, &page, &len, &offset);
|
||||
bio_add_page(bio, page, logical_block_size, offset);
|
||||
num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
|
||||
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
|
||||
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
|
||||
|
||||
offset = 0;
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
#include <linux/log2.h>
|
||||
#include <linux/dm-kcopyd.h>
|
||||
|
||||
#include "dm.h"
|
||||
|
||||
#include "dm-exception-store.h"
|
||||
|
||||
#define DM_MSG_PREFIX "snapshots"
|
||||
|
@ -290,6 +292,16 @@ struct origin {
|
|||
struct list_head snapshots;
|
||||
};
|
||||
|
||||
/*
|
||||
* This structure is allocated for each origin target
|
||||
*/
|
||||
struct dm_origin {
|
||||
struct dm_dev *dev;
|
||||
struct dm_target *ti;
|
||||
unsigned split_boundary;
|
||||
struct list_head hash_list;
|
||||
};
|
||||
|
||||
/*
|
||||
* Size of the hash table for origin volumes. If we make this
|
||||
* the size of the minors list then it should be nearly perfect
|
||||
|
@ -297,6 +309,7 @@ struct origin {
|
|||
#define ORIGIN_HASH_SIZE 256
|
||||
#define ORIGIN_MASK 0xFF
|
||||
static struct list_head *_origins;
|
||||
static struct list_head *_dm_origins;
|
||||
static struct rw_semaphore _origins_lock;
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
|
||||
|
@ -310,12 +323,22 @@ static int init_origin_hash(void)
|
|||
_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
|
||||
GFP_KERNEL);
|
||||
if (!_origins) {
|
||||
DMERR("unable to allocate memory");
|
||||
DMERR("unable to allocate memory for _origins");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < ORIGIN_HASH_SIZE; i++)
|
||||
INIT_LIST_HEAD(_origins + i);
|
||||
|
||||
_dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
|
||||
GFP_KERNEL);
|
||||
if (!_dm_origins) {
|
||||
DMERR("unable to allocate memory for _dm_origins");
|
||||
kfree(_origins);
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < ORIGIN_HASH_SIZE; i++)
|
||||
INIT_LIST_HEAD(_dm_origins + i);
|
||||
|
||||
init_rwsem(&_origins_lock);
|
||||
|
||||
return 0;
|
||||
|
@ -324,6 +347,7 @@ static int init_origin_hash(void)
|
|||
static void exit_origin_hash(void)
|
||||
{
|
||||
kfree(_origins);
|
||||
kfree(_dm_origins);
|
||||
}
|
||||
|
||||
static unsigned origin_hash(struct block_device *bdev)
|
||||
|
@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o)
|
|||
list_add_tail(&o->hash_list, sl);
|
||||
}
|
||||
|
||||
static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
|
||||
{
|
||||
struct list_head *ol;
|
||||
struct dm_origin *o;
|
||||
|
||||
ol = &_dm_origins[origin_hash(origin)];
|
||||
list_for_each_entry (o, ol, hash_list)
|
||||
if (bdev_equal(o->dev->bdev, origin))
|
||||
return o;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __insert_dm_origin(struct dm_origin *o)
|
||||
{
|
||||
struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
|
||||
list_add_tail(&o->hash_list, sl);
|
||||
}
|
||||
|
||||
static void __remove_dm_origin(struct dm_origin *o)
|
||||
{
|
||||
list_del(&o->hash_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* _origins_lock must be held when calling this function.
|
||||
* Returns number of snapshots registered using the supplied cow device, plus:
|
||||
|
@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti)
|
|||
static void snapshot_resume(struct dm_target *ti)
|
||||
{
|
||||
struct dm_snapshot *s = ti->private;
|
||||
struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
|
||||
struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
|
||||
struct dm_origin *o;
|
||||
struct mapped_device *origin_md = NULL;
|
||||
bool must_restart_merging = false;
|
||||
|
||||
down_read(&_origins_lock);
|
||||
|
||||
o = __lookup_dm_origin(s->origin->bdev);
|
||||
if (o)
|
||||
origin_md = dm_table_get_md(o->ti->table);
|
||||
if (!origin_md) {
|
||||
(void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
|
||||
if (snap_merging)
|
||||
origin_md = dm_table_get_md(snap_merging->ti->table);
|
||||
}
|
||||
if (origin_md == dm_table_get_md(ti->table))
|
||||
origin_md = NULL;
|
||||
if (origin_md) {
|
||||
if (dm_hold(origin_md))
|
||||
origin_md = NULL;
|
||||
}
|
||||
|
||||
up_read(&_origins_lock);
|
||||
|
||||
if (origin_md) {
|
||||
dm_internal_suspend_fast(origin_md);
|
||||
if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
|
||||
must_restart_merging = true;
|
||||
stop_merge(snap_merging);
|
||||
}
|
||||
}
|
||||
|
||||
down_read(&_origins_lock);
|
||||
|
||||
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
|
||||
if (snap_src && snap_dest) {
|
||||
down_write(&snap_src->lock);
|
||||
|
@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti)
|
|||
up_write(&snap_dest->lock);
|
||||
up_write(&snap_src->lock);
|
||||
}
|
||||
|
||||
up_read(&_origins_lock);
|
||||
|
||||
if (origin_md) {
|
||||
if (must_restart_merging)
|
||||
start_merge(snap_merging);
|
||||
dm_internal_resume_fast(origin_md);
|
||||
dm_put(origin_md);
|
||||
}
|
||||
|
||||
/* Now we have correct chunk size, reregister */
|
||||
reregister_snapshot(s);
|
||||
|
||||
|
@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
|
|||
* Origin: maps a linear range of a device, with hooks for snapshotting.
|
||||
*/
|
||||
|
||||
struct dm_origin {
|
||||
struct dm_dev *dev;
|
||||
unsigned split_boundary;
|
||||
};
|
||||
|
||||
/*
|
||||
* Construct an origin mapping: <dev_path>
|
||||
* The context for an origin is merely a 'struct dm_dev *'
|
||||
|
@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
goto bad_open;
|
||||
}
|
||||
|
||||
o->ti = ti;
|
||||
ti->private = o;
|
||||
ti->num_flush_bios = 1;
|
||||
|
||||
|
@ -2180,6 +2263,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
static void origin_dtr(struct dm_target *ti)
|
||||
{
|
||||
struct dm_origin *o = ti->private;
|
||||
|
||||
dm_put_device(ti, o->dev);
|
||||
kfree(o);
|
||||
}
|
||||
|
@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti)
|
|||
struct dm_origin *o = ti->private;
|
||||
|
||||
o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
|
||||
|
||||
down_write(&_origins_lock);
|
||||
__insert_dm_origin(o);
|
||||
up_write(&_origins_lock);
|
||||
}
|
||||
|
||||
static void origin_postsuspend(struct dm_target *ti)
|
||||
{
|
||||
struct dm_origin *o = ti->private;
|
||||
|
||||
down_write(&_origins_lock);
|
||||
__remove_dm_origin(o);
|
||||
up_write(&_origins_lock);
|
||||
}
|
||||
|
||||
static void origin_status(struct dm_target *ti, status_type_t type,
|
||||
|
@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti,
|
|||
|
||||
static struct target_type origin_target = {
|
||||
.name = "snapshot-origin",
|
||||
.version = {1, 8, 1},
|
||||
.version = {1, 9, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = origin_ctr,
|
||||
.dtr = origin_dtr,
|
||||
.map = origin_map,
|
||||
.resume = origin_resume,
|
||||
.postsuspend = origin_postsuspend,
|
||||
.status = origin_status,
|
||||
.merge = origin_merge,
|
||||
.iterate_devices = origin_iterate_devices,
|
||||
|
@ -2271,7 +2369,7 @@ static struct target_type origin_target = {
|
|||
|
||||
static struct target_type snapshot_target = {
|
||||
.name = "snapshot",
|
||||
.version = {1, 12, 0},
|
||||
.version = {1, 13, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = snapshot_ctr,
|
||||
.dtr = snapshot_dtr,
|
||||
|
@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = {
|
|||
|
||||
static struct target_type merge_target = {
|
||||
.name = dm_snapshot_merge_target_name,
|
||||
.version = {1, 2, 0},
|
||||
.version = {1, 3, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = snapshot_ctr,
|
||||
.dtr = snapshot_dtr,
|
||||
|
|
|
@ -2358,17 +2358,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|||
return DM_MAPIO_REMAPPED;
|
||||
|
||||
case -ENODATA:
|
||||
if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
|
||||
/*
|
||||
* This block isn't provisioned, and we have no way
|
||||
* of doing so.
|
||||
*/
|
||||
handle_unserviceable_bio(tc->pool, bio);
|
||||
cell_defer_no_holder(tc, virt_cell);
|
||||
return DM_MAPIO_SUBMITTED;
|
||||
}
|
||||
/* fall through */
|
||||
|
||||
case -EWOULDBLOCK:
|
||||
thin_defer_cell(tc, virt_cell);
|
||||
return DM_MAPIO_SUBMITTED;
|
||||
|
|
|
@ -2616,6 +2616,19 @@ void dm_get(struct mapped_device *md)
|
|||
BUG_ON(test_bit(DMF_FREEING, &md->flags));
|
||||
}
|
||||
|
||||
int dm_hold(struct mapped_device *md)
|
||||
{
|
||||
spin_lock(&_minor_lock);
|
||||
if (test_bit(DMF_FREEING, &md->flags)) {
|
||||
spin_unlock(&_minor_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
dm_get(md);
|
||||
spin_unlock(&_minor_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_hold);
|
||||
|
||||
const char *dm_device_name(struct mapped_device *md)
|
||||
{
|
||||
return md->name;
|
||||
|
@ -2638,10 +2651,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
|
|||
if (dm_request_based(md))
|
||||
flush_kthread_worker(&md->kworker);
|
||||
|
||||
/*
|
||||
* Take suspend_lock so that presuspend and postsuspend methods
|
||||
* do not race with internal suspend.
|
||||
*/
|
||||
mutex_lock(&md->suspend_lock);
|
||||
if (!dm_suspended_md(md)) {
|
||||
dm_table_presuspend_targets(map);
|
||||
dm_table_postsuspend_targets(map);
|
||||
}
|
||||
mutex_unlock(&md->suspend_lock);
|
||||
|
||||
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
|
@ -3115,6 +3134,7 @@ void dm_internal_suspend_fast(struct mapped_device *md)
|
|||
flush_workqueue(md->wq);
|
||||
dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
|
||||
|
||||
void dm_internal_resume_fast(struct mapped_device *md)
|
||||
{
|
||||
|
@ -3126,6 +3146,7 @@ void dm_internal_resume_fast(struct mapped_device *md)
|
|||
done:
|
||||
mutex_unlock(&md->suspend_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Event notification.
|
||||
|
|
|
@ -375,6 +375,7 @@ int dm_create(int minor, struct mapped_device **md);
|
|||
*/
|
||||
struct mapped_device *dm_get_md(dev_t dev);
|
||||
void dm_get(struct mapped_device *md);
|
||||
int dm_hold(struct mapped_device *md);
|
||||
void dm_put(struct mapped_device *md);
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue