mirror of https://gitee.com/openkylin/linux.git
NFS client updates for Linux 3.15
Highlights include: - Stable fix for a use after free issue in the NFSv4.1 open code - Fix the SUNRPC bi-directional RPC code to account for TCP segmentation - Optimise usage of readdirplus when confronted with 'ls -l' situations - Soft mount bugfixes - NFS over RDMA bugfixes - NFSv4 close locking fixes - Various NFSv4.x client state management optimisations - Rename/unlink code cleanups -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJTQBayAAoJEGcL54qWCgDyUzgQAKzSlbcksMQT55M/KZJXabNW KSctJeDrkTkRxOXTNxuF9NbIgeqenLijCokXty6BIUgup0zkOPMzFfRfgdQvplnp YEj4sOEXEZ8CX+PoUTYOEayzt0ssEAOyidumiM+Gx2LD/E1d2xyCL7YaAOjIhVQS OnXcX1cZw+dZSUxC9vu5fVDjrphJTnp4CXdbvR5PiJiXeKqzZd9e5M3hXgpAQ/AS mWjYeUvM9mwyz7UmbLKkWEmzB3tFlGdTzDPxLRrkfcOSKI2Ham0lL3/Uv50/nRTu 99ts6KH8KLGcUuL9vD9KRebht2f71usBrWAdvpy1cUcf1Fh6lmEg4ktGfkqldaUu 9kNu9d5DCxJoGc6R2UTw5FeyPwYuDWoBwEGy1DcguJ5CeQn2R2nH4ps/P3J3DX4d DZsJqCY9idKZCQhtyR0iF9j3x2bNFoENaL6WHI6b0J+xjMedIbHgeUQzIQP0RLBJ h0IcjK0D+e7WdyC7jk4Nm3krtms5SNUG5/N9OUO36a7v8735PJBcbcgm9hZJt8Fh t/4vqUmKIBXHioHsMhaFslqTWlYIR9a3MYmN7QtHFYbqUfNxH69v9y3d6jb4Igck kqoEiui5aJOCR76s7oVdHCcm+klBwEPiACT+H9CUMzSoKzHSWsBSNZbJR3BEia4M 7dwScS1OfI2KuutshGQA =weNx -----END PGP SIGNATURE----- Merge tag 'nfs-for-3.15-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs Pull NFS client updates from Trond Myklebust: "Highlights include: - Stable fix for a use after free issue in the NFSv4.1 open code - Fix the SUNRPC bi-directional RPC code to account for TCP segmentation - Optimise usage of readdirplus when confronted with 'ls -l' situations - Soft mount bugfixes - NFS over RDMA bugfixes - NFSv4 close locking fixes - Various NFSv4.x client state management optimisations - Rename/unlink code cleanups" * tag 'nfs-for-3.15-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (28 commits) nfs: pass string length to pr_notice message about readdir loops NFSv4: Fix a use-after-free problem in open() SUNRPC: rpc_restart_call/rpc_restart_call_prepare should clear task->tk_status SUNRPC: Don't let rpc_delay() clobber non-timeout errors SUNRPC: Ensure call_connect_status() deals correctly with SOFTCONN tasks SUNRPC: Ensure call_status() deals correctly with SOFTCONN tasks NFSv4: Ensure we respect soft mount timeouts during trunking discovery NFSv4: Schedule recovery if nfs40_walk_client_list() is interrupted NFS: advertise only supported callback netids SUNRPC: remove KERN_INFO from dprintk() call sites SUNRPC: Fix large reads on NFS/RDMA NFS: Clean up: revert increase in READDIR RPC buffer max size SUNRPC: Ensure that call_bind times out correctly SUNRPC: Ensure that call_connect times out correctly nfs: emit a fsnotify_nameremove call in sillyrename codepath nfs: remove synchronous rename code nfs: convert nfs_rename to use async_rename infrastructure nfs: make nfs_async_rename non-static nfs: abstract out code needed to complete a sillyrename NFSv4: Clear the open state flags if the new stateid does not match ...
This commit is contained in:
commit
2b3a8fd735
|
@ -112,7 +112,8 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
|
|||
* TODO: keep track of all layouts (and delegations) in a hash table
|
||||
* hashed by filehandle.
|
||||
*/
|
||||
static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp, struct nfs_fh *fh)
|
||||
static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
|
||||
struct nfs_fh *fh, nfs4_stateid *stateid)
|
||||
{
|
||||
struct nfs_server *server;
|
||||
struct inode *ino;
|
||||
|
@ -120,17 +121,19 @@ static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
|
|||
|
||||
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
|
||||
list_for_each_entry(lo, &server->layouts, plh_layouts) {
|
||||
if (!nfs4_stateid_match_other(&lo->plh_stateid, stateid))
|
||||
continue;
|
||||
if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh))
|
||||
continue;
|
||||
ino = igrab(lo->plh_inode);
|
||||
if (!ino)
|
||||
continue;
|
||||
break;
|
||||
spin_lock(&ino->i_lock);
|
||||
/* Is this layout in the process of being freed? */
|
||||
if (NFS_I(ino)->layout != lo) {
|
||||
spin_unlock(&ino->i_lock);
|
||||
iput(ino);
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
pnfs_get_layout_hdr(lo);
|
||||
spin_unlock(&ino->i_lock);
|
||||
|
@ -141,13 +144,14 @@ static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp, struct nfs_fh *fh)
|
||||
static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp,
|
||||
struct nfs_fh *fh, nfs4_stateid *stateid)
|
||||
{
|
||||
struct pnfs_layout_hdr *lo;
|
||||
|
||||
spin_lock(&clp->cl_lock);
|
||||
rcu_read_lock();
|
||||
lo = get_layout_by_fh_locked(clp, fh);
|
||||
lo = get_layout_by_fh_locked(clp, fh, stateid);
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&clp->cl_lock);
|
||||
|
||||
|
@ -162,9 +166,9 @@ static u32 initiate_file_draining(struct nfs_client *clp,
|
|||
u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
|
||||
LIST_HEAD(free_me_list);
|
||||
|
||||
lo = get_layout_by_fh(clp, &args->cbl_fh);
|
||||
lo = get_layout_by_fh(clp, &args->cbl_fh, &args->cbl_stateid);
|
||||
if (!lo)
|
||||
return NFS4ERR_NOMATCHING_LAYOUT;
|
||||
goto out;
|
||||
|
||||
ino = lo->plh_inode;
|
||||
spin_lock(&ino->i_lock);
|
||||
|
@ -179,6 +183,7 @@ static u32 initiate_file_draining(struct nfs_client *clp,
|
|||
pnfs_free_lseg_list(&free_me_list);
|
||||
pnfs_put_layout_hdr(lo);
|
||||
iput(ino);
|
||||
out:
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
|
62
fs/nfs/dir.c
62
fs/nfs/dir.c
|
@ -69,21 +69,28 @@ const struct address_space_operations nfs_dir_aops = {
|
|||
|
||||
static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir, struct rpc_cred *cred)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(dir);
|
||||
struct nfs_open_dir_context *ctx;
|
||||
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (ctx != NULL) {
|
||||
ctx->duped = 0;
|
||||
ctx->attr_gencount = NFS_I(dir)->attr_gencount;
|
||||
ctx->attr_gencount = nfsi->attr_gencount;
|
||||
ctx->dir_cookie = 0;
|
||||
ctx->dup_cookie = 0;
|
||||
ctx->cred = get_rpccred(cred);
|
||||
spin_lock(&dir->i_lock);
|
||||
list_add(&ctx->list, &nfsi->open_files);
|
||||
spin_unlock(&dir->i_lock);
|
||||
return ctx;
|
||||
}
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static void put_nfs_open_dir_context(struct nfs_open_dir_context *ctx)
|
||||
static void put_nfs_open_dir_context(struct inode *dir, struct nfs_open_dir_context *ctx)
|
||||
{
|
||||
spin_lock(&dir->i_lock);
|
||||
list_del(&ctx->list);
|
||||
spin_unlock(&dir->i_lock);
|
||||
put_rpccred(ctx->cred);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
@ -126,7 +133,7 @@ nfs_opendir(struct inode *inode, struct file *filp)
|
|||
static int
|
||||
nfs_closedir(struct inode *inode, struct file *filp)
|
||||
{
|
||||
put_nfs_open_dir_context(filp->private_data);
|
||||
put_nfs_open_dir_context(filp->f_path.dentry->d_inode, filp->private_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -306,10 +313,9 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
|
|||
if (printk_ratelimit()) {
|
||||
pr_notice("NFS: directory %pD2 contains a readdir loop."
|
||||
"Please contact your server vendor. "
|
||||
"The file: %s has duplicate cookie %llu\n",
|
||||
desc->file,
|
||||
array->array[i].string.name,
|
||||
*desc->dir_cookie);
|
||||
"The file: %.*s has duplicate cookie %llu\n",
|
||||
desc->file, array->array[i].string.len,
|
||||
array->array[i].string.name, *desc->dir_cookie);
|
||||
}
|
||||
status = -ELOOP;
|
||||
goto out;
|
||||
|
@ -437,6 +443,22 @@ void nfs_advise_use_readdirplus(struct inode *dir)
|
|||
set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is mainly for use by nfs_getattr().
|
||||
*
|
||||
* If this is an 'ls -l', we want to force use of readdirplus.
|
||||
* Do this by checking if there is an active file descriptor
|
||||
* and calling nfs_advise_use_readdirplus, then forcing a
|
||||
* cache flush.
|
||||
*/
|
||||
void nfs_force_use_readdirplus(struct inode *dir)
|
||||
{
|
||||
if (!list_empty(&NFS_I(dir)->open_files)) {
|
||||
nfs_advise_use_readdirplus(dir);
|
||||
nfs_zap_mapping(dir, dir->i_mapping);
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
|
||||
{
|
||||
|
@ -815,6 +837,17 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc)
|
|||
goto out;
|
||||
}
|
||||
|
||||
static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(dir);
|
||||
|
||||
if (nfs_attribute_cache_expired(dir))
|
||||
return true;
|
||||
if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* The file offset position represents the dirent entry number. A
|
||||
last cookie cache takes care of the common case of reading the
|
||||
whole directory.
|
||||
|
@ -847,7 +880,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0;
|
||||
|
||||
nfs_block_sillyrename(dentry);
|
||||
if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
|
||||
if (ctx->pos == 0 || nfs_dir_mapping_need_revalidate(inode))
|
||||
res = nfs_revalidate_mapping(inode, file->f_mapping);
|
||||
if (res < 0)
|
||||
goto out;
|
||||
|
@ -1911,6 +1944,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
struct inode *old_inode = old_dentry->d_inode;
|
||||
struct inode *new_inode = new_dentry->d_inode;
|
||||
struct dentry *dentry = NULL, *rehash = NULL;
|
||||
struct rpc_task *task;
|
||||
int error = -EBUSY;
|
||||
|
||||
dfprintk(VFS, "NFS: rename(%pd2 -> %pd2, ct=%d)\n",
|
||||
|
@ -1958,8 +1992,16 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
if (new_inode != NULL)
|
||||
NFS_PROTO(new_inode)->return_delegation(new_inode);
|
||||
|
||||
error = NFS_PROTO(old_dir)->rename(old_dir, &old_dentry->d_name,
|
||||
new_dir, &new_dentry->d_name);
|
||||
task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
|
||||
if (IS_ERR(task)) {
|
||||
error = PTR_ERR(task);
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = rpc_wait_for_completion_task(task);
|
||||
if (error == 0)
|
||||
error = task->tk_status;
|
||||
rpc_put_task(task);
|
||||
nfs_mark_for_revalidate(old_inode);
|
||||
out:
|
||||
if (rehash)
|
||||
|
|
|
@ -588,6 +588,25 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
|
||||
|
||||
static void nfs_request_parent_use_readdirplus(struct dentry *dentry)
|
||||
{
|
||||
struct dentry *parent;
|
||||
|
||||
parent = dget_parent(dentry);
|
||||
nfs_force_use_readdirplus(parent->d_inode);
|
||||
dput(parent);
|
||||
}
|
||||
|
||||
static bool nfs_need_revalidate_inode(struct inode *inode)
|
||||
{
|
||||
if (NFS_I(inode)->cache_validity &
|
||||
(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL))
|
||||
return true;
|
||||
if (nfs_attribute_cache_expired(inode))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
|
@ -616,10 +635,13 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
|
|||
((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
|
||||
need_atime = 0;
|
||||
|
||||
if (need_atime)
|
||||
err = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
|
||||
else
|
||||
err = nfs_revalidate_inode(NFS_SERVER(inode), inode);
|
||||
if (need_atime || nfs_need_revalidate_inode(inode)) {
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
|
||||
if (server->caps & NFS_CAP_READDIRPLUS)
|
||||
nfs_request_parent_use_readdirplus(dentry);
|
||||
err = __nfs_revalidate_inode(server, inode);
|
||||
}
|
||||
if (!err) {
|
||||
generic_fillattr(inode, stat);
|
||||
stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
|
||||
|
@ -961,9 +983,7 @@ int nfs_attribute_cache_expired(struct inode *inode)
|
|||
*/
|
||||
int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
|
||||
{
|
||||
if (!(NFS_I(inode)->cache_validity &
|
||||
(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL))
|
||||
&& !nfs_attribute_cache_expired(inode))
|
||||
if (!nfs_need_revalidate_inode(inode))
|
||||
return NFS_STALE(inode) ? -ESTALE : 0;
|
||||
return __nfs_revalidate_inode(server, inode);
|
||||
}
|
||||
|
|
|
@ -301,6 +301,7 @@ extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
|
|||
const char *ip_addr);
|
||||
|
||||
/* dir.c */
|
||||
extern void nfs_force_use_readdirplus(struct inode *dir);
|
||||
extern unsigned long nfs_access_cache_count(struct shrinker *shrink,
|
||||
struct shrink_control *sc);
|
||||
extern unsigned long nfs_access_cache_scan(struct shrinker *shrink,
|
||||
|
@ -474,6 +475,13 @@ extern int nfs_migrate_page(struct address_space *,
|
|||
#define nfs_migrate_page NULL
|
||||
#endif
|
||||
|
||||
/* unlink.c */
|
||||
extern struct rpc_task *
|
||||
nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
|
||||
struct dentry *old_dentry, struct dentry *new_dentry,
|
||||
void (*complete)(struct rpc_task *, struct nfs_renamedata *));
|
||||
extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry);
|
||||
|
||||
/* direct.c */
|
||||
void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
|
||||
struct nfs_direct_req *dreq);
|
||||
|
|
|
@ -478,41 +478,6 @@ nfs3_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int
|
||||
nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
|
||||
struct inode *new_dir, struct qstr *new_name)
|
||||
{
|
||||
struct nfs_renameargs arg = {
|
||||
.old_dir = NFS_FH(old_dir),
|
||||
.old_name = old_name,
|
||||
.new_dir = NFS_FH(new_dir),
|
||||
.new_name = new_name,
|
||||
};
|
||||
struct nfs_renameres res;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs3_procedures[NFS3PROC_RENAME],
|
||||
.rpc_argp = &arg,
|
||||
.rpc_resp = &res,
|
||||
};
|
||||
int status = -ENOMEM;
|
||||
|
||||
dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name);
|
||||
|
||||
res.old_fattr = nfs_alloc_fattr();
|
||||
res.new_fattr = nfs_alloc_fattr();
|
||||
if (res.old_fattr == NULL || res.new_fattr == NULL)
|
||||
goto out;
|
||||
|
||||
status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0);
|
||||
nfs_post_op_update_inode(old_dir, res.old_fattr);
|
||||
nfs_post_op_update_inode(new_dir, res.new_fattr);
|
||||
out:
|
||||
nfs_free_fattr(res.old_fattr);
|
||||
nfs_free_fattr(res.new_fattr);
|
||||
dprintk("NFS reply rename: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
||||
static int
|
||||
nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
|
||||
{
|
||||
|
@ -968,7 +933,6 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
|
|||
.unlink_setup = nfs3_proc_unlink_setup,
|
||||
.unlink_rpc_prepare = nfs3_proc_unlink_rpc_prepare,
|
||||
.unlink_done = nfs3_proc_unlink_done,
|
||||
.rename = nfs3_proc_rename,
|
||||
.rename_setup = nfs3_proc_rename_setup,
|
||||
.rename_rpc_prepare = nfs3_proc_rename_rpc_prepare,
|
||||
.rename_done = nfs3_proc_rename_done,
|
||||
|
|
|
@ -427,6 +427,7 @@ extern void nfs4_close_sync(struct nfs4_state *, fmode_t);
|
|||
extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
|
||||
extern void nfs_inode_find_state_and_recover(struct inode *inode,
|
||||
const nfs4_stateid *stateid);
|
||||
extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *, struct nfs4_state *);
|
||||
extern void nfs4_schedule_lease_recovery(struct nfs_client *);
|
||||
extern int nfs4_wait_clnt_recover(struct nfs_client *clp);
|
||||
extern int nfs4_client_recover_expired_lease(struct nfs_client *clp);
|
||||
|
@ -500,6 +501,16 @@ static inline bool nfs4_stateid_match(const nfs4_stateid *dst, const nfs4_statei
|
|||
return memcmp(dst, src, sizeof(*dst)) == 0;
|
||||
}
|
||||
|
||||
static inline bool nfs4_stateid_match_other(const nfs4_stateid *dst, const nfs4_stateid *src)
|
||||
{
|
||||
return memcmp(dst->other, src->other, NFS4_STATEID_OTHER_SIZE) == 0;
|
||||
}
|
||||
|
||||
static inline bool nfs4_stateid_is_newer(const nfs4_stateid *s1, const nfs4_stateid *s2)
|
||||
{
|
||||
return (s32)(be32_to_cpu(s1->seqid) - be32_to_cpu(s2->seqid)) > 0;
|
||||
}
|
||||
|
||||
static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state)
|
||||
{
|
||||
return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0;
|
||||
|
|
|
@ -531,6 +531,13 @@ int nfs40_walk_client_list(struct nfs_client *new,
|
|||
*result = pos;
|
||||
dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
|
||||
__func__, pos, atomic_read(&pos->cl_count));
|
||||
goto out;
|
||||
case -ERESTARTSYS:
|
||||
case -ETIMEDOUT:
|
||||
/* The callback path may have been inadvertently
|
||||
* changed. Schedule recovery!
|
||||
*/
|
||||
nfs4_schedule_path_down_recovery(pos);
|
||||
default:
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -1068,6 +1068,7 @@ static void nfs4_opendata_free(struct kref *kref)
|
|||
dput(p->dentry);
|
||||
nfs_sb_deactive(sb);
|
||||
nfs_fattr_free_names(&p->f_attr);
|
||||
kfree(p->f_attr.mdsthreshold);
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
|
@ -1137,12 +1138,71 @@ static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
|
|||
nfs4_state_set_mode_locked(state, state->state | fmode);
|
||||
}
|
||||
|
||||
static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
|
||||
static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
|
||||
{
|
||||
struct nfs_client *clp = state->owner->so_server->nfs_client;
|
||||
bool need_recover = false;
|
||||
|
||||
if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
|
||||
need_recover = true;
|
||||
if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
|
||||
need_recover = true;
|
||||
if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
|
||||
need_recover = true;
|
||||
if (need_recover)
|
||||
nfs4_state_mark_reclaim_nograce(clp, state);
|
||||
}
|
||||
|
||||
static bool nfs_need_update_open_stateid(struct nfs4_state *state,
|
||||
nfs4_stateid *stateid)
|
||||
{
|
||||
if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
|
||||
return true;
|
||||
if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
|
||||
nfs_test_and_clear_all_open_stateid(state);
|
||||
return true;
|
||||
}
|
||||
if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
|
||||
nfs4_stateid *stateid, fmode_t fmode)
|
||||
{
|
||||
clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
||||
switch (fmode & (FMODE_READ|FMODE_WRITE)) {
|
||||
case FMODE_WRITE:
|
||||
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
||||
break;
|
||||
case FMODE_READ:
|
||||
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
||||
break;
|
||||
case 0:
|
||||
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
||||
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
||||
clear_bit(NFS_OPEN_STATE, &state->flags);
|
||||
}
|
||||
if (stateid == NULL)
|
||||
return;
|
||||
if (!nfs_need_update_open_stateid(state, stateid))
|
||||
return;
|
||||
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
|
||||
nfs4_stateid_copy(&state->stateid, stateid);
|
||||
nfs4_stateid_copy(&state->open_stateid, stateid);
|
||||
set_bit(NFS_OPEN_STATE, &state->flags);
|
||||
}
|
||||
|
||||
static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
|
||||
{
|
||||
write_seqlock(&state->seqlock);
|
||||
nfs_clear_open_stateid_locked(state, stateid, fmode);
|
||||
write_sequnlock(&state->seqlock);
|
||||
if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
|
||||
nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
|
||||
}
|
||||
|
||||
static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
|
||||
{
|
||||
switch (fmode) {
|
||||
case FMODE_READ:
|
||||
set_bit(NFS_O_RDONLY_STATE, &state->flags);
|
||||
|
@ -1153,13 +1213,11 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *
|
|||
case FMODE_READ|FMODE_WRITE:
|
||||
set_bit(NFS_O_RDWR_STATE, &state->flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
|
||||
{
|
||||
write_seqlock(&state->seqlock);
|
||||
nfs_set_open_stateid_locked(state, stateid, fmode);
|
||||
write_sequnlock(&state->seqlock);
|
||||
if (!nfs_need_update_open_stateid(state, stateid))
|
||||
return;
|
||||
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
|
||||
nfs4_stateid_copy(&state->stateid, stateid);
|
||||
nfs4_stateid_copy(&state->open_stateid, stateid);
|
||||
}
|
||||
|
||||
static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
|
||||
|
@ -1217,6 +1275,8 @@ static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stat
|
|||
__update_open_stateid(state, open_stateid, NULL, fmode);
|
||||
ret = 1;
|
||||
}
|
||||
if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
|
||||
nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1450,12 +1510,15 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
|
|||
struct nfs4_state *newstate;
|
||||
int ret;
|
||||
|
||||
/* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
|
||||
clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
||||
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
||||
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
||||
/* memory barrier prior to reading state->n_* */
|
||||
clear_bit(NFS_DELEGATED_STATE, &state->flags);
|
||||
clear_bit(NFS_OPEN_STATE, &state->flags);
|
||||
smp_rmb();
|
||||
if (state->n_rdwr != 0) {
|
||||
clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
||||
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
@ -1463,7 +1526,6 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
|
|||
return -ESTALE;
|
||||
}
|
||||
if (state->n_wronly != 0) {
|
||||
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
||||
ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
@ -1471,7 +1533,6 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
|
|||
return -ESTALE;
|
||||
}
|
||||
if (state->n_rdonly != 0) {
|
||||
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
||||
ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
@ -2244,10 +2305,12 @@ static int _nfs4_do_open(struct inode *dir,
|
|||
}
|
||||
}
|
||||
|
||||
if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
|
||||
opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
|
||||
if (!opendata->f_attr.mdsthreshold)
|
||||
goto err_free_label;
|
||||
if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
|
||||
if (!opendata->f_attr.mdsthreshold) {
|
||||
opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
|
||||
if (!opendata->f_attr.mdsthreshold)
|
||||
goto err_free_label;
|
||||
}
|
||||
opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
|
||||
}
|
||||
if (dentry->d_inode != NULL)
|
||||
|
@ -2275,11 +2338,10 @@ static int _nfs4_do_open(struct inode *dir,
|
|||
if (opendata->file_created)
|
||||
*opened |= FILE_CREATED;
|
||||
|
||||
if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
|
||||
if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
|
||||
*ctx_th = opendata->f_attr.mdsthreshold;
|
||||
else
|
||||
kfree(opendata->f_attr.mdsthreshold);
|
||||
opendata->f_attr.mdsthreshold = NULL;
|
||||
opendata->f_attr.mdsthreshold = NULL;
|
||||
}
|
||||
|
||||
nfs4_label_free(olabel);
|
||||
|
||||
|
@ -2289,7 +2351,6 @@ static int _nfs4_do_open(struct inode *dir,
|
|||
err_free_label:
|
||||
nfs4_label_free(olabel);
|
||||
err_opendata_put:
|
||||
kfree(opendata->f_attr.mdsthreshold);
|
||||
nfs4_opendata_put(opendata);
|
||||
err_put_state_owner:
|
||||
nfs4_put_state_owner(sp);
|
||||
|
@ -2479,26 +2540,6 @@ static void nfs4_free_closedata(void *data)
|
|||
kfree(calldata);
|
||||
}
|
||||
|
||||
static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
|
||||
fmode_t fmode)
|
||||
{
|
||||
spin_lock(&state->owner->so_lock);
|
||||
clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
||||
switch (fmode & (FMODE_READ|FMODE_WRITE)) {
|
||||
case FMODE_WRITE:
|
||||
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
||||
break;
|
||||
case FMODE_READ:
|
||||
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
||||
break;
|
||||
case 0:
|
||||
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
||||
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
||||
clear_bit(NFS_OPEN_STATE, &state->flags);
|
||||
}
|
||||
spin_unlock(&state->owner->so_lock);
|
||||
}
|
||||
|
||||
static void nfs4_close_done(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nfs4_closedata *calldata = data;
|
||||
|
@ -2517,9 +2558,9 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
|
|||
if (calldata->roc)
|
||||
pnfs_roc_set_barrier(state->inode,
|
||||
calldata->roc_barrier);
|
||||
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
|
||||
nfs_clear_open_stateid(state, &calldata->res.stateid, 0);
|
||||
renew_lease(server, calldata->timestamp);
|
||||
break;
|
||||
goto out_release;
|
||||
case -NFS4ERR_ADMIN_REVOKED:
|
||||
case -NFS4ERR_STALE_STATEID:
|
||||
case -NFS4ERR_OLD_STATEID:
|
||||
|
@ -2533,7 +2574,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
|
|||
goto out_release;
|
||||
}
|
||||
}
|
||||
nfs4_close_clear_stateid_flags(state, calldata->arg.fmode);
|
||||
nfs_clear_open_stateid(state, NULL, calldata->arg.fmode);
|
||||
out_release:
|
||||
nfs_release_seqid(calldata->arg.seqid);
|
||||
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
|
||||
|
@ -3507,49 +3548,6 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
|
||||
struct inode *new_dir, struct qstr *new_name)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(old_dir);
|
||||
struct nfs_renameargs arg = {
|
||||
.old_dir = NFS_FH(old_dir),
|
||||
.new_dir = NFS_FH(new_dir),
|
||||
.old_name = old_name,
|
||||
.new_name = new_name,
|
||||
};
|
||||
struct nfs_renameres res = {
|
||||
.server = server,
|
||||
};
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
|
||||
.rpc_argp = &arg,
|
||||
.rpc_resp = &res,
|
||||
};
|
||||
int status = -ENOMEM;
|
||||
|
||||
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
|
||||
if (!status) {
|
||||
update_changeattr(old_dir, &res.old_cinfo);
|
||||
update_changeattr(new_dir, &res.new_cinfo);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
|
||||
struct inode *new_dir, struct qstr *new_name)
|
||||
{
|
||||
struct nfs4_exception exception = { };
|
||||
int err;
|
||||
do {
|
||||
err = _nfs4_proc_rename(old_dir, old_name,
|
||||
new_dir, new_name);
|
||||
trace_nfs4_rename(old_dir, old_name, new_dir, new_name, err);
|
||||
err = nfs4_handle_exception(NFS_SERVER(old_dir), err,
|
||||
&exception);
|
||||
} while (exception.retry);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
|
@ -4884,6 +4882,20 @@ nfs4_init_uniform_client_string(const struct nfs_client *clp,
|
|||
nodename);
|
||||
}
|
||||
|
||||
/*
|
||||
* nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
|
||||
* services. Advertise one based on the address family of the
|
||||
* clientaddr.
|
||||
*/
|
||||
static unsigned int
|
||||
nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
|
||||
{
|
||||
if (strchr(clp->cl_ipaddr, ':') != NULL)
|
||||
return scnprintf(buf, len, "tcp6");
|
||||
else
|
||||
return scnprintf(buf, len, "tcp");
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs4_proc_setclientid - Negotiate client ID
|
||||
* @clp: state data structure
|
||||
|
@ -4925,12 +4937,10 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
|
|||
setclientid.sc_name,
|
||||
sizeof(setclientid.sc_name));
|
||||
/* cb_client4 */
|
||||
rcu_read_lock();
|
||||
setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
|
||||
sizeof(setclientid.sc_netid), "%s",
|
||||
rpc_peeraddr2str(clp->cl_rpcclient,
|
||||
RPC_DISPLAY_NETID));
|
||||
rcu_read_unlock();
|
||||
setclientid.sc_netid_len =
|
||||
nfs4_init_callback_netid(clp,
|
||||
setclientid.sc_netid,
|
||||
sizeof(setclientid.sc_netid));
|
||||
setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
|
||||
sizeof(setclientid.sc_uaddr), "%s.%u.%u",
|
||||
clp->cl_ipaddr, port >> 8, port & 255);
|
||||
|
@ -8408,7 +8418,6 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
|
|||
.unlink_setup = nfs4_proc_unlink_setup,
|
||||
.unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
|
||||
.unlink_done = nfs4_proc_unlink_done,
|
||||
.rename = nfs4_proc_rename,
|
||||
.rename_setup = nfs4_proc_rename_setup,
|
||||
.rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
|
||||
.rename_done = nfs4_proc_rename_done,
|
||||
|
|
|
@ -1316,7 +1316,7 @@ static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_st
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
|
||||
int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
|
||||
{
|
||||
set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
|
||||
clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
|
||||
|
@ -2075,8 +2075,10 @@ int nfs4_discover_server_trunking(struct nfs_client *clp,
|
|||
switch (status) {
|
||||
case 0:
|
||||
break;
|
||||
case -NFS4ERR_DELAY:
|
||||
case -ETIMEDOUT:
|
||||
if (clnt->cl_softrtry)
|
||||
break;
|
||||
case -NFS4ERR_DELAY:
|
||||
case -EAGAIN:
|
||||
ssleep(1);
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
|
|
|
@ -203,8 +203,7 @@ static int nfs4_stat_to_errno(int);
|
|||
2 + encode_verifier_maxsz + 5 + \
|
||||
nfs4_label_maxsz)
|
||||
#define decode_readdir_maxsz (op_decode_hdr_maxsz + \
|
||||
decode_verifier_maxsz + \
|
||||
nfs4_label_maxsz + nfs4_fattr_maxsz)
|
||||
decode_verifier_maxsz)
|
||||
#define encode_readlink_maxsz (op_encode_hdr_maxsz)
|
||||
#define decode_readlink_maxsz (op_decode_hdr_maxsz + 1)
|
||||
#define encode_write_maxsz (op_encode_hdr_maxsz + \
|
||||
|
|
|
@ -662,7 +662,18 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
|
|||
*/
|
||||
static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
|
||||
{
|
||||
return (s32)s1 - (s32)s2 > 0;
|
||||
return (s32)(s1 - s2) > 0;
|
||||
}
|
||||
|
||||
static void
|
||||
pnfs_verify_layout_stateid(struct pnfs_layout_hdr *lo,
|
||||
const nfs4_stateid *new,
|
||||
struct list_head *free_me_list)
|
||||
{
|
||||
if (nfs4_stateid_match_other(&lo->plh_stateid, new))
|
||||
return;
|
||||
/* Layout is new! Kill existing layout segments */
|
||||
pnfs_mark_matching_lsegs_invalid(lo, free_me_list, NULL);
|
||||
}
|
||||
|
||||
/* update lo->plh_stateid with new if is more recent */
|
||||
|
@ -1315,6 +1326,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
|
|||
struct nfs4_layoutget_res *res = &lgp->res;
|
||||
struct pnfs_layout_segment *lseg;
|
||||
struct inode *ino = lo->plh_inode;
|
||||
LIST_HEAD(free_me);
|
||||
int status = 0;
|
||||
|
||||
/* Inject layout blob into I/O device driver */
|
||||
|
@ -1341,6 +1353,8 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
|
|||
goto out_forget_reply;
|
||||
}
|
||||
|
||||
/* Check that the new stateid matches the old stateid */
|
||||
pnfs_verify_layout_stateid(lo, &res->stateid, &free_me);
|
||||
/* Done processing layoutget. Set the layout stateid */
|
||||
pnfs_set_layout_stateid(lo, &res->stateid, false);
|
||||
|
||||
|
@ -1355,6 +1369,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
|
|||
}
|
||||
|
||||
spin_unlock(&ino->i_lock);
|
||||
pnfs_free_lseg_list(&free_me);
|
||||
return lseg;
|
||||
out:
|
||||
return ERR_PTR(status);
|
||||
|
|
|
@ -356,30 +356,6 @@ nfs_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int
|
||||
nfs_proc_rename(struct inode *old_dir, struct qstr *old_name,
|
||||
struct inode *new_dir, struct qstr *new_name)
|
||||
{
|
||||
struct nfs_renameargs arg = {
|
||||
.old_dir = NFS_FH(old_dir),
|
||||
.old_name = old_name,
|
||||
.new_dir = NFS_FH(new_dir),
|
||||
.new_name = new_name,
|
||||
};
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs_procedures[NFSPROC_RENAME],
|
||||
.rpc_argp = &arg,
|
||||
};
|
||||
int status;
|
||||
|
||||
dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name);
|
||||
status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0);
|
||||
nfs_mark_for_revalidate(old_dir);
|
||||
nfs_mark_for_revalidate(new_dir);
|
||||
dprintk("NFS reply rename: %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
||||
static int
|
||||
nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
|
||||
{
|
||||
|
@ -745,7 +721,6 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
|
|||
.unlink_setup = nfs_proc_unlink_setup,
|
||||
.unlink_rpc_prepare = nfs_proc_unlink_rpc_prepare,
|
||||
.unlink_done = nfs_proc_unlink_done,
|
||||
.rename = nfs_proc_rename,
|
||||
.rename_setup = nfs_proc_rename_setup,
|
||||
.rename_rpc_prepare = nfs_proc_rename_rpc_prepare,
|
||||
.rename_done = nfs_proc_rename_done,
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/fsnotify.h>
|
||||
|
||||
#include "internal.h"
|
||||
#include "nfs4_fs.h"
|
||||
|
@ -353,8 +354,8 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
|
|||
return;
|
||||
}
|
||||
|
||||
if (task->tk_status != 0)
|
||||
nfs_cancel_async_unlink(old_dentry);
|
||||
if (data->complete)
|
||||
data->complete(task, data);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -399,9 +400,10 @@ static const struct rpc_call_ops nfs_rename_ops = {
|
|||
*
|
||||
* It's expected that valid references to the dentries and inodes are held
|
||||
*/
|
||||
static struct rpc_task *
|
||||
struct rpc_task *
|
||||
nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
|
||||
struct dentry *old_dentry, struct dentry *new_dentry)
|
||||
struct dentry *old_dentry, struct dentry *new_dentry,
|
||||
void (*complete)(struct rpc_task *, struct nfs_renamedata *))
|
||||
{
|
||||
struct nfs_renamedata *data;
|
||||
struct rpc_message msg = { };
|
||||
|
@ -438,6 +440,7 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
|
|||
data->new_dentry = dget(new_dentry);
|
||||
nfs_fattr_init(&data->old_fattr);
|
||||
nfs_fattr_init(&data->new_fattr);
|
||||
data->complete = complete;
|
||||
|
||||
/* set up nfs_renameargs */
|
||||
data->args.old_dir = NFS_FH(old_dir);
|
||||
|
@ -456,6 +459,27 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
|
|||
return rpc_run_task(&task_setup_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform tasks needed when a sillyrename is done such as cancelling the
|
||||
* queued async unlink if it failed.
|
||||
*/
|
||||
static void
|
||||
nfs_complete_sillyrename(struct rpc_task *task, struct nfs_renamedata *data)
|
||||
{
|
||||
struct dentry *dentry = data->old_dentry;
|
||||
|
||||
if (task->tk_status != 0) {
|
||||
nfs_cancel_async_unlink(dentry);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* vfs_unlink and the like do not issue this when a file is
|
||||
* sillyrenamed, so do it here.
|
||||
*/
|
||||
fsnotify_nameremove(dentry, 0);
|
||||
}
|
||||
|
||||
#define SILLYNAME_PREFIX ".nfs"
|
||||
#define SILLYNAME_PREFIX_LEN ((unsigned)sizeof(SILLYNAME_PREFIX) - 1)
|
||||
#define SILLYNAME_FILEID_LEN ((unsigned)sizeof(u64) << 1)
|
||||
|
@ -548,7 +572,8 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
|
|||
}
|
||||
|
||||
/* run the rename task, undo unlink if it fails */
|
||||
task = nfs_async_rename(dir, dir, dentry, sdentry);
|
||||
task = nfs_async_rename(dir, dir, dentry, sdentry,
|
||||
nfs_complete_sillyrename);
|
||||
if (IS_ERR(task)) {
|
||||
error = -EBUSY;
|
||||
nfs_cancel_async_unlink(dentry);
|
||||
|
|
|
@ -92,6 +92,7 @@ struct nfs_open_context {
|
|||
};
|
||||
|
||||
struct nfs_open_dir_context {
|
||||
struct list_head list;
|
||||
struct rpc_cred *cred;
|
||||
unsigned long attr_gencount;
|
||||
__u64 dir_cookie;
|
||||
|
@ -510,7 +511,6 @@ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
|
|||
extern void nfs_wait_on_sillyrename(struct dentry *dentry);
|
||||
extern void nfs_block_sillyrename(struct dentry *dentry);
|
||||
extern void nfs_unblock_sillyrename(struct dentry *dentry);
|
||||
extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry);
|
||||
|
||||
/*
|
||||
* linux/fs/nfs/write.c
|
||||
|
|
|
@ -1402,6 +1402,7 @@ struct nfs_renamedata {
|
|||
struct inode *new_dir;
|
||||
struct dentry *new_dentry;
|
||||
struct nfs_fattr new_fattr;
|
||||
void (*complete)(struct rpc_task *, struct nfs_renamedata *);
|
||||
};
|
||||
|
||||
struct nfs_access_entry;
|
||||
|
@ -1444,8 +1445,6 @@ struct nfs_rpc_ops {
|
|||
void (*unlink_setup) (struct rpc_message *, struct inode *dir);
|
||||
void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *);
|
||||
int (*unlink_done) (struct rpc_task *, struct inode *);
|
||||
int (*rename) (struct inode *, struct qstr *,
|
||||
struct inode *, struct qstr *);
|
||||
void (*rename_setup) (struct rpc_message *msg, struct inode *dir);
|
||||
void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *);
|
||||
int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir);
|
||||
|
|
|
@ -32,7 +32,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <linux/sunrpc/sched.h>
|
||||
|
||||
#ifdef CONFIG_SUNRPC_BACKCHANNEL
|
||||
struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt);
|
||||
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid);
|
||||
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
|
||||
void xprt_free_bc_request(struct rpc_rqst *req);
|
||||
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
|
||||
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
|
||||
|
|
|
@ -212,39 +212,23 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
|
||||
|
||||
/*
|
||||
* One or more rpc_rqst structure have been preallocated during the
|
||||
* backchannel setup. Buffer space for the send and private XDR buffers
|
||||
* has been preallocated as well. Use xprt_alloc_bc_request to allocate
|
||||
* to this request. Use xprt_free_bc_request to return it.
|
||||
*
|
||||
* We know that we're called in soft interrupt context, grab the spin_lock
|
||||
* since there is no need to grab the bottom half spin_lock.
|
||||
*
|
||||
* Return an available rpc_rqst, otherwise NULL if non are available.
|
||||
*/
|
||||
struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
|
||||
static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
|
||||
{
|
||||
struct rpc_rqst *req;
|
||||
struct rpc_rqst *req = NULL;
|
||||
|
||||
dprintk("RPC: allocate a backchannel request\n");
|
||||
spin_lock(&xprt->bc_pa_lock);
|
||||
if (!list_empty(&xprt->bc_pa_list)) {
|
||||
req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
|
||||
rq_bc_pa_list);
|
||||
list_del(&req->rq_bc_pa_list);
|
||||
} else {
|
||||
req = NULL;
|
||||
}
|
||||
spin_unlock(&xprt->bc_pa_lock);
|
||||
if (list_empty(&xprt->bc_pa_list))
|
||||
goto not_found;
|
||||
|
||||
if (req != NULL) {
|
||||
set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
|
||||
req->rq_reply_bytes_recvd = 0;
|
||||
req->rq_bytes_sent = 0;
|
||||
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
|
||||
req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
|
||||
rq_bc_pa_list);
|
||||
req->rq_reply_bytes_recvd = 0;
|
||||
req->rq_bytes_sent = 0;
|
||||
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
|
||||
sizeof(req->rq_private_buf));
|
||||
}
|
||||
req->rq_xid = xid;
|
||||
req->rq_connect_cookie = xprt->connect_cookie;
|
||||
not_found:
|
||||
dprintk("RPC: backchannel req=%p\n", req);
|
||||
return req;
|
||||
}
|
||||
|
@ -259,6 +243,7 @@ void xprt_free_bc_request(struct rpc_rqst *req)
|
|||
|
||||
dprintk("RPC: free backchannel req=%p\n", req);
|
||||
|
||||
req->rq_connect_cookie = xprt->connect_cookie - 1;
|
||||
smp_mb__before_clear_bit();
|
||||
WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
|
||||
clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
|
||||
|
@ -281,7 +266,57 @@ void xprt_free_bc_request(struct rpc_rqst *req)
|
|||
* may be reused by a new callback request.
|
||||
*/
|
||||
spin_lock_bh(&xprt->bc_pa_lock);
|
||||
list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
|
||||
list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
|
||||
spin_unlock_bh(&xprt->bc_pa_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* One or more rpc_rqst structure have been preallocated during the
|
||||
* backchannel setup. Buffer space for the send and private XDR buffers
|
||||
* has been preallocated as well. Use xprt_alloc_bc_request to allocate
|
||||
* to this request. Use xprt_free_bc_request to return it.
|
||||
*
|
||||
* We know that we're called in soft interrupt context, grab the spin_lock
|
||||
* since there is no need to grab the bottom half spin_lock.
|
||||
*
|
||||
* Return an available rpc_rqst, otherwise NULL if non are available.
|
||||
*/
|
||||
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
|
||||
{
|
||||
struct rpc_rqst *req;
|
||||
|
||||
spin_lock(&xprt->bc_pa_lock);
|
||||
list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
|
||||
if (req->rq_connect_cookie != xprt->connect_cookie)
|
||||
continue;
|
||||
if (req->rq_xid == xid)
|
||||
goto found;
|
||||
}
|
||||
req = xprt_alloc_bc_request(xprt, xid);
|
||||
found:
|
||||
spin_unlock(&xprt->bc_pa_lock);
|
||||
return req;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add callback request to callback list. The callback
|
||||
* service sleeps on the sv_cb_waitq waiting for new
|
||||
* requests. Wake it up after adding enqueing the
|
||||
* request.
|
||||
*/
|
||||
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
|
||||
{
|
||||
struct rpc_xprt *xprt = req->rq_xprt;
|
||||
struct svc_serv *bc_serv = xprt->bc_serv;
|
||||
|
||||
req->rq_private_buf.len = copied;
|
||||
set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
|
||||
|
||||
dprintk("RPC: add callback request to list\n");
|
||||
spin_lock(&bc_serv->sv_cb_lock);
|
||||
list_del(&req->rq_bc_pa_list);
|
||||
list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
|
||||
wake_up(&bc_serv->sv_cb_waitq);
|
||||
spin_unlock(&bc_serv->sv_cb_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -1363,6 +1363,7 @@ rpc_restart_call_prepare(struct rpc_task *task)
|
|||
if (RPC_ASSASSINATED(task))
|
||||
return 0;
|
||||
task->tk_action = call_start;
|
||||
task->tk_status = 0;
|
||||
if (task->tk_ops->rpc_call_prepare != NULL)
|
||||
task->tk_action = rpc_prepare_task;
|
||||
return 1;
|
||||
|
@ -1379,6 +1380,7 @@ rpc_restart_call(struct rpc_task *task)
|
|||
if (RPC_ASSASSINATED(task))
|
||||
return 0;
|
||||
task->tk_action = call_start;
|
||||
task->tk_status = 0;
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_restart_call);
|
||||
|
@ -1728,9 +1730,7 @@ call_bind_status(struct rpc_task *task)
|
|||
case -EPROTONOSUPPORT:
|
||||
dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
|
||||
task->tk_pid);
|
||||
task->tk_status = 0;
|
||||
task->tk_action = call_bind;
|
||||
return;
|
||||
goto retry_timeout;
|
||||
case -ECONNREFUSED: /* connection problems */
|
||||
case -ECONNRESET:
|
||||
case -ECONNABORTED:
|
||||
|
@ -1756,6 +1756,7 @@ call_bind_status(struct rpc_task *task)
|
|||
return;
|
||||
|
||||
retry_timeout:
|
||||
task->tk_status = 0;
|
||||
task->tk_action = call_timeout;
|
||||
}
|
||||
|
||||
|
@ -1798,21 +1799,19 @@ call_connect_status(struct rpc_task *task)
|
|||
trace_rpc_connect_status(task, status);
|
||||
task->tk_status = 0;
|
||||
switch (status) {
|
||||
/* if soft mounted, test if we've timed out */
|
||||
case -ETIMEDOUT:
|
||||
task->tk_action = call_timeout;
|
||||
return;
|
||||
case -ECONNREFUSED:
|
||||
case -ECONNRESET:
|
||||
case -ECONNABORTED:
|
||||
case -ENETUNREACH:
|
||||
case -EHOSTUNREACH:
|
||||
/* retry with existing socket, after a delay */
|
||||
rpc_delay(task, 3*HZ);
|
||||
if (RPC_IS_SOFTCONN(task))
|
||||
break;
|
||||
/* retry with existing socket, after a delay */
|
||||
rpc_delay(task, 3*HZ);
|
||||
case -EAGAIN:
|
||||
task->tk_action = call_bind;
|
||||
/* Check for timeouts before looping back to call_bind */
|
||||
case -ETIMEDOUT:
|
||||
task->tk_action = call_timeout;
|
||||
return;
|
||||
case 0:
|
||||
clnt->cl_stats->netreconn++;
|
||||
|
@ -2007,6 +2006,10 @@ call_status(struct rpc_task *task)
|
|||
case -EHOSTDOWN:
|
||||
case -EHOSTUNREACH:
|
||||
case -ENETUNREACH:
|
||||
if (RPC_IS_SOFTCONN(task)) {
|
||||
rpc_exit(task, status);
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Delay any retries for 3 seconds, then handle as if it
|
||||
* were a timeout.
|
||||
|
|
|
@ -637,7 +637,8 @@ static void __rpc_queue_timer_fn(unsigned long ptr)
|
|||
|
||||
static void __rpc_atrun(struct rpc_task *task)
|
||||
{
|
||||
task->tk_status = 0;
|
||||
if (task->tk_status == -ETIMEDOUT)
|
||||
task->tk_status = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -649,9 +649,7 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
|
|||
break;
|
||||
page_base = 0;
|
||||
}
|
||||
rqst->rq_rcv_buf.page_len = olen - copy_len;
|
||||
} else
|
||||
rqst->rq_rcv_buf.page_len = 0;
|
||||
}
|
||||
|
||||
if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
|
||||
curlen = copy_len;
|
||||
|
|
|
@ -733,7 +733,7 @@ static void __exit xprt_rdma_cleanup(void)
|
|||
{
|
||||
int rc;
|
||||
|
||||
dprintk(KERN_INFO "RPCRDMA Module Removed, deregister RPC RDMA transport\n");
|
||||
dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n");
|
||||
#ifdef RPC_DEBUG
|
||||
if (sunrpc_table_header) {
|
||||
unregister_sysctl_table(sunrpc_table_header);
|
||||
|
@ -755,14 +755,14 @@ static int __init xprt_rdma_init(void)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
dprintk(KERN_INFO "RPCRDMA Module Init, register RPC RDMA transport\n");
|
||||
dprintk("RPCRDMA Module Init, register RPC RDMA transport\n");
|
||||
|
||||
dprintk(KERN_INFO "Defaults:\n");
|
||||
dprintk(KERN_INFO "\tSlots %d\n"
|
||||
dprintk("Defaults:\n");
|
||||
dprintk("\tSlots %d\n"
|
||||
"\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
|
||||
xprt_rdma_slot_table_entries,
|
||||
xprt_rdma_max_inline_read, xprt_rdma_max_inline_write);
|
||||
dprintk(KERN_INFO "\tPadding %d\n\tMemreg %d\n",
|
||||
dprintk("\tPadding %d\n\tMemreg %d\n",
|
||||
xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy);
|
||||
|
||||
#ifdef RPC_DEBUG
|
||||
|
|
|
@ -1306,41 +1306,29 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
|
|||
* If we're unable to obtain the rpc_rqst we schedule the closing of the
|
||||
* connection and return -1.
|
||||
*/
|
||||
static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
|
||||
static int xs_tcp_read_callback(struct rpc_xprt *xprt,
|
||||
struct xdr_skb_reader *desc)
|
||||
{
|
||||
struct sock_xprt *transport =
|
||||
container_of(xprt, struct sock_xprt, xprt);
|
||||
struct rpc_rqst *req;
|
||||
|
||||
req = xprt_alloc_bc_request(xprt);
|
||||
/* Look up and lock the request corresponding to the given XID */
|
||||
spin_lock(&xprt->transport_lock);
|
||||
req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
|
||||
if (req == NULL) {
|
||||
spin_unlock(&xprt->transport_lock);
|
||||
printk(KERN_WARNING "Callback slot table overflowed\n");
|
||||
xprt_force_disconnect(xprt);
|
||||
return -1;
|
||||
}
|
||||
|
||||
req->rq_xid = transport->tcp_xid;
|
||||
dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
|
||||
xs_tcp_read_common(xprt, desc, req);
|
||||
|
||||
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
|
||||
struct svc_serv *bc_serv = xprt->bc_serv;
|
||||
|
||||
/*
|
||||
* Add callback request to callback list. The callback
|
||||
* service sleeps on the sv_cb_waitq waiting for new
|
||||
* requests. Wake it up after adding enqueing the
|
||||
* request.
|
||||
*/
|
||||
dprintk("RPC: add callback request to list\n");
|
||||
spin_lock(&bc_serv->sv_cb_lock);
|
||||
list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
|
||||
spin_unlock(&bc_serv->sv_cb_lock);
|
||||
wake_up(&bc_serv->sv_cb_waitq);
|
||||
}
|
||||
|
||||
req->rq_private_buf.len = transport->tcp_copied;
|
||||
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
|
||||
xprt_complete_bc_request(req, transport->tcp_copied);
|
||||
spin_unlock(&xprt->transport_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue