ceph: kill ceph_empty_snapc

ceph_empty_snapc->num_snaps == 0 at all times.  Passing such a snapc to
ceph_osdc_alloc_request() (possibly through ceph_osdc_new_request()) is
equivalent to passing NULL, as ceph_osdc_alloc_request() uses it only
for sizing the request message.

Further, in all four cases the subsequent ceph_osdc_build_request() is
passed NULL for snapc, meaning that 0 is encoded for seq and num_snaps
and making ceph_empty_snapc entirely useless.  The two cases where it
actually mattered were removed in commits 8605609049 ("ceph: avoid
sending unnessesary FLUSHSNAP message") and 23078637e0 ("ceph: fix
queuing inode to mdsdir's snaprealm").

Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Reviewed-by:  Yan, Zheng <zyan@redhat.com>
This commit is contained in:
Ilya Dryomov 2016-02-16 15:00:24 +01:00
parent ce4355932a
commit 34b759b4a2
4 changed files with 6 additions and 34 deletions

View File

@ -1609,7 +1609,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
ceph_vino(inode), 0, &len, 0, 1, ceph_vino(inode), 0, &len, 0, 1,
CEPH_OSD_OP_CREATE, CEPH_OSD_OP_CREATE,
CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
ceph_empty_snapc, 0, 0, false); NULL, 0, 0, false);
if (IS_ERR(req)) { if (IS_ERR(req)) {
err = PTR_ERR(req); err = PTR_ERR(req);
goto out; goto out;
@ -1627,9 +1627,8 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
ceph_vino(inode), 0, &len, 1, 3, ceph_vino(inode), 0, &len, 1, 3,
CEPH_OSD_OP_WRITE, CEPH_OSD_OP_WRITE,
CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
ceph_empty_snapc, NULL, ci->i_truncate_seq,
ci->i_truncate_seq, ci->i_truncate_size, ci->i_truncate_size, false);
false);
if (IS_ERR(req)) { if (IS_ERR(req)) {
err = PTR_ERR(req); err = PTR_ERR(req);
goto out; goto out;
@ -1750,8 +1749,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
goto out; goto out;
} }
rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
ceph_empty_snapc,
1, false, GFP_NOFS); 1, false, GFP_NOFS);
if (!rd_req) { if (!rd_req) {
err = -ENOMEM; err = -ENOMEM;
@ -1765,8 +1763,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
"%llx.00000000", ci->i_vino.ino); "%llx.00000000", ci->i_vino.ino);
rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name); rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name);
wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
ceph_empty_snapc,
1, false, GFP_NOFS); 1, false, GFP_NOFS);
if (!wr_req) { if (!wr_req) {
err = -ENOMEM; err = -ENOMEM;

View File

@ -296,8 +296,6 @@ static int cmpu64_rev(const void *a, const void *b)
} }
struct ceph_snap_context *ceph_empty_snapc;
/* /*
* build the snap context for a given realm. * build the snap context for a given realm.
*/ */
@ -987,17 +985,3 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
up_write(&mdsc->snap_rwsem); up_write(&mdsc->snap_rwsem);
return; return;
} }
int __init ceph_snap_init(void)
{
ceph_empty_snapc = ceph_create_snap_context(0, GFP_NOFS);
if (!ceph_empty_snapc)
return -ENOMEM;
ceph_empty_snapc->seq = 1;
return 0;
}
void ceph_snap_exit(void)
{
ceph_put_snap_context(ceph_empty_snapc);
}

View File

@ -1042,19 +1042,14 @@ static int __init init_ceph(void)
ceph_flock_init(); ceph_flock_init();
ceph_xattr_init(); ceph_xattr_init();
ret = ceph_snap_init();
if (ret)
goto out_xattr;
ret = register_filesystem(&ceph_fs_type); ret = register_filesystem(&ceph_fs_type);
if (ret) if (ret)
goto out_snap; goto out_xattr;
pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL); pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
return 0; return 0;
out_snap:
ceph_snap_exit();
out_xattr: out_xattr:
ceph_xattr_exit(); ceph_xattr_exit();
destroy_caches(); destroy_caches();
@ -1066,7 +1061,6 @@ static void __exit exit_ceph(void)
{ {
dout("exit_ceph\n"); dout("exit_ceph\n");
unregister_filesystem(&ceph_fs_type); unregister_filesystem(&ceph_fs_type);
ceph_snap_exit();
ceph_xattr_exit(); ceph_xattr_exit();
destroy_caches(); destroy_caches();
} }

View File

@ -720,7 +720,6 @@ static inline int default_congestion_kb(void)
/* snap.c */ /* snap.c */
extern struct ceph_snap_context *ceph_empty_snapc;
struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
u64 ino); u64 ino);
extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc, extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
@ -737,8 +736,6 @@ extern void ceph_queue_cap_snap(struct ceph_inode_info *ci);
extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap); struct ceph_cap_snap *capsnap);
extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
extern int ceph_snap_init(void);
extern void ceph_snap_exit(void);
/* /*
* a cap_snap is "pending" if it is still awaiting an in-progress * a cap_snap is "pending" if it is still awaiting an in-progress