From 2f18d46683cb3047c41229d57cf7c6e2ee48676f Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Wed, 4 Apr 2018 10:15:38 +0200 Subject: [PATCH 1/6] rbd: refactor rbd_wait_state_locked() In preparation for lock_timeout option, make rbd_wait_state_locked() return error codes. Signed-off-by: Ilya Dryomov --- drivers/block/rbd.c | 43 ++++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 07dc5419bd63..f4b1b91e6d4d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3533,9 +3533,21 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, /* * lock_rwsem must be held for read */ -static void rbd_wait_state_locked(struct rbd_device *rbd_dev) +static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire) { DEFINE_WAIT(wait); + int ret = 0; + + if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) + return -EBLACKLISTED; + + if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) + return 0; + + if (!may_acquire) { + rbd_warn(rbd_dev, "exclusive lock required"); + return -EROFS; + } do { /* @@ -3549,10 +3561,14 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev) up_read(&rbd_dev->lock_rwsem); schedule(); down_read(&rbd_dev->lock_rwsem); - } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && - !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); + if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { + ret = -EBLACKLISTED; + break; + } + } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); finish_wait(&rbd_dev->lock_waitq, &wait); + return ret; } static void rbd_queue_workfn(struct work_struct *work) @@ -3638,19 +3654,10 @@ static void rbd_queue_workfn(struct work_struct *work) (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); if (must_be_locked) { down_read(&rbd_dev->lock_rwsem); - if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && - !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { - if (rbd_dev->opts->exclusive) { - rbd_warn(rbd_dev, "exclusive lock required"); - result = -EROFS; - goto err_unlock; - } - rbd_wait_state_locked(rbd_dev); - } - if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { - result = -EBLACKLISTED; + result = rbd_wait_state_locked(rbd_dev, + !rbd_dev->opts->exclusive); + if (result) goto err_unlock; - } } img_request = rbd_img_request_create(rbd_dev, op_type, snapc); @@ -5216,6 +5223,8 @@ static void rbd_dev_image_unlock(struct rbd_device *rbd_dev) static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) { + int ret; + if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); return -EINVAL; @@ -5223,9 +5232,9 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) /* FIXME: "rbd map --exclusive" should be in interruptible */ down_read(&rbd_dev->lock_rwsem); - rbd_wait_state_locked(rbd_dev); + ret = rbd_wait_state_locked(rbd_dev, true); up_read(&rbd_dev->lock_rwsem); - if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { + if (ret) { rbd_warn(rbd_dev, "failed to acquire exclusive lock"); return -EROFS; } From 34f55d0b3a0a39c95134c0c89173893b846d4c80 Mon Sep 17 00:00:00 2001 From: Dongsheng Yang Date: Mon, 26 Mar 2018 10:22:55 -0400 Subject: [PATCH 2/6] rbd: support timeout in rbd_wait_state_locked() currently, the rbd_wait_state_locked() will wait forever if we can't get our state locked. Example: rbd map --exclusive test1 --> /dev/rbd0 rbd map test1 --> /dev/rbd1 dd if=/dev/zero of=/dev/rbd1 bs=1M count=1 --> IO blocked To avoid this problem, this patch introduce a timeout design in rbd_wait_state_locked(). Then rbd_wait_state_locked() will return error when we reach a timeout. This patch allow user to set the lock_timeout in rbd mapping. Signed-off-by: Dongsheng Yang Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- drivers/block/rbd.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f4b1b91e6d4d..d5a51493e8b5 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -732,6 +732,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) */ enum { Opt_queue_depth, + Opt_lock_timeout, Opt_last_int, /* int args above */ Opt_last_string, @@ -745,6 +746,7 @@ enum { static match_table_t rbd_opts_tokens = { {Opt_queue_depth, "queue_depth=%d"}, + {Opt_lock_timeout, "lock_timeout=%d"}, /* int args above */ /* string args above */ {Opt_read_only, "read_only"}, @@ -758,12 +760,14 @@ static match_table_t rbd_opts_tokens = { struct rbd_options { int queue_depth; + unsigned long lock_timeout; bool read_only; bool lock_on_read; bool exclusive; }; #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ +#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ #define RBD_READ_ONLY_DEFAULT false #define RBD_LOCK_ON_READ_DEFAULT false #define RBD_EXCLUSIVE_DEFAULT false @@ -796,6 +800,14 @@ static int parse_rbd_opts_token(char *c, void *private) } rbd_opts->queue_depth = intval; break; + case Opt_lock_timeout: + /* 0 is "wait forever" (i.e. infinite timeout) */ + if (intval < 0 || intval > INT_MAX / 1000) { + pr_err("lock_timeout out of range\n"); + return -EINVAL; + } + rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000); + break; case Opt_read_only: rbd_opts->read_only = true; break; @@ -3536,6 +3548,7 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire) { DEFINE_WAIT(wait); + unsigned long timeout; int ret = 0; if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) @@ -3559,12 +3572,18 @@ static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire) prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, TASK_UNINTERRUPTIBLE); up_read(&rbd_dev->lock_rwsem); - schedule(); + timeout = schedule_timeout(ceph_timeout_jiffies( + rbd_dev->opts->lock_timeout)); down_read(&rbd_dev->lock_rwsem); if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { ret = -EBLACKLISTED; break; } + if (!timeout) { + rbd_warn(rbd_dev, "timed out waiting for lock"); + ret = -ETIMEDOUT; + break; + } } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); finish_wait(&rbd_dev->lock_waitq, &wait); @@ -5186,6 +5205,7 @@ static int rbd_add_parse_args(const char *buf, rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; + rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT; rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; From ffdeec7aa41aa61ca4ee68fddf4669df9ce661d1 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 26 Mar 2018 16:46:39 +0800 Subject: [PATCH 3/6] ceph: always update atime/mtime/ctime for new inode For new inode, atime/mtime/ctime are uninitialized. Don't compare against them. Cc: stable@kernel.org Signed-off-by: "Yan, Zheng" Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- fs/ceph/inode.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 8bf60250309e..ae056927080d 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -669,13 +669,15 @@ void ceph_fill_file_time(struct inode *inode, int issued, CEPH_CAP_FILE_BUFFER| CEPH_CAP_AUTH_EXCL| CEPH_CAP_XATTR_EXCL)) { - if (timespec_compare(ctime, &inode->i_ctime) > 0) { + if (ci->i_version == 0 || + timespec_compare(ctime, &inode->i_ctime) > 0) { dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, ctime->tv_sec, ctime->tv_nsec); inode->i_ctime = *ctime; } - if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { + if (ci->i_version == 0 || + ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { /* the MDS did a utimes() */ dout("mtime %ld.%09ld -> %ld.%09ld " "tw %d -> %d\n", @@ -795,7 +797,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page, new_issued = ~issued & le32_to_cpu(info->cap.caps); /* update inode */ - ci->i_version = le64_to_cpu(info->version); inode->i_rdev = le32_to_cpu(info->rdev); inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; @@ -868,6 +869,9 @@ static int fill_inode(struct inode *inode, struct page *locked_page, xattr_blob = NULL; } + /* finally update i_version */ + ci->i_version = le64_to_cpu(info->version); + inode->i_mapping->a_ops = &ceph_aops; switch (inode->i_mode & S_IFMT) { From c6244b3b23771b258656445dcd212be759265b84 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 4 Apr 2018 14:53:39 +0200 Subject: [PATCH 4/6] rbd: avoid Wreturn-type warnings In some configurations gcc cannot see that rbd_assert(0) leads to an unreachable code path: drivers/block/rbd.c: In function 'rbd_img_is_write': drivers/block/rbd.c:1397:1: error: control reaches end of non-void function [-Werror=return-type] drivers/block/rbd.c: In function '__rbd_obj_handle_request': drivers/block/rbd.c:2499:1: error: control reaches end of non-void function [-Werror=return-type] drivers/block/rbd.c: In function 'rbd_obj_handle_write': drivers/block/rbd.c:2471:1: error: control reaches end of non-void function [-Werror=return-type] As the rbd_assert() here shows has no extra information beyond the verbose BUG(), we can simply use BUG() directly in its place. This is reliably detected as not returning on any architecture, since it doesn't depend on the unlikely() comparison that confused gcc. Fixes: 3da691bf4366 ("rbd: new request handling code") Signed-off-by: Arnd Bergmann Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- drivers/block/rbd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d5a51493e8b5..e40e490ff967 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1404,7 +1404,7 @@ static bool rbd_img_is_write(struct rbd_img_request *img_req) case OBJ_OP_DISCARD: return true; default: - rbd_assert(0); + BUG(); } } @@ -2478,7 +2478,7 @@ static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req) } return false; default: - rbd_assert(0); + BUG(); } } @@ -2506,7 +2506,7 @@ static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req) } return false; default: - rbd_assert(0); + BUG(); } } From 420efbdf4d2358dc12913298ad44d041c6ac0ed6 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 16 Apr 2018 09:32:18 +0200 Subject: [PATCH 5/6] rbd: adjust queue limits for "fancy" striping In order to take full advantage of merging in ceph_file_to_extents(), allow object set sized I/Os. If the layout is not "fancy", an object set consists of just one object. Signed-off-by: Ilya Dryomov --- drivers/block/rbd.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e40e490ff967..6a1805858b79 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3928,7 +3928,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) { struct gendisk *disk; struct request_queue *q; - u64 segment_size; + unsigned int objset_bytes = + rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; int err; /* create gendisk info */ @@ -3968,20 +3969,18 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ - /* set io sizes to object size */ - segment_size = rbd_obj_bytes(&rbd_dev->header); - blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); + blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT); q->limits.max_sectors = queue_max_hw_sectors(q); blk_queue_max_segments(q, USHRT_MAX); blk_queue_max_segment_size(q, UINT_MAX); - blk_queue_io_min(q, segment_size); - blk_queue_io_opt(q, segment_size); + blk_queue_io_min(q, objset_bytes); + blk_queue_io_opt(q, objset_bytes); /* enable the discard support */ blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - q->limits.discard_granularity = segment_size; - blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); - blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); + q->limits.discard_granularity = objset_bytes; + blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); + blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; From d93605407af34eb0b7eb8aff6b1eae2cde3cdd22 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Fri, 23 Mar 2018 06:14:47 +0100 Subject: [PATCH 6/6] rbd: notrim map option Add an option to turn off discard and write zeroes offload support to avoid deprovisioning a fully provisioned image. When enabled, discard requests will fail with -EOPNOTSUPP, write zeroes requests will fall back to manually zeroing. Signed-off-by: Ilya Dryomov Tested-by: Hitoshi Kamei --- drivers/block/rbd.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 6a1805858b79..8e8b04cc569a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -741,6 +741,7 @@ enum { Opt_read_write, Opt_lock_on_read, Opt_exclusive, + Opt_notrim, Opt_err }; @@ -755,6 +756,7 @@ static match_table_t rbd_opts_tokens = { {Opt_read_write, "rw"}, /* Alternate spelling */ {Opt_lock_on_read, "lock_on_read"}, {Opt_exclusive, "exclusive"}, + {Opt_notrim, "notrim"}, {Opt_err, NULL} }; @@ -764,6 +766,7 @@ struct rbd_options { bool read_only; bool lock_on_read; bool exclusive; + bool trim; }; #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ @@ -771,6 +774,7 @@ struct rbd_options { #define RBD_READ_ONLY_DEFAULT false #define RBD_LOCK_ON_READ_DEFAULT false #define RBD_EXCLUSIVE_DEFAULT false +#define RBD_TRIM_DEFAULT true static int parse_rbd_opts_token(char *c, void *private) { @@ -820,6 +824,9 @@ static int parse_rbd_opts_token(char *c, void *private) case Opt_exclusive: rbd_opts->exclusive = true; break; + case Opt_notrim: + rbd_opts->trim = false; + break; default: /* libceph prints "bad option" msg */ return -EINVAL; @@ -3976,11 +3983,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) blk_queue_io_min(q, objset_bytes); blk_queue_io_opt(q, objset_bytes); - /* enable the discard support */ - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - q->limits.discard_granularity = objset_bytes; - blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); - blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); + if (rbd_dev->opts->trim) { + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + q->limits.discard_granularity = objset_bytes; + blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); + blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); + } if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; @@ -5207,6 +5215,7 @@ static int rbd_add_parse_args(const char *buf, rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT; rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; + rbd_opts->trim = RBD_TRIM_DEFAULT; copts = ceph_parse_options(options, mon_addrs, mon_addrs + mon_addrs_size - 1,