SUNRPC: Fix a race in rpc_info_open

There is a race between rpc_info_open and rpc_release_client()
in that nothing stops a process from opening the file after
the clnt->cl_kref goes to zero.

Fix this by using atomic_inc_unless_zero()...

Reported-by: J. Bruce Fields <bfields@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: stable@kernel.org
This commit is contained in:
Trond Myklebust 2010-09-12 19:55:25 -04:00
parent 5a67657a2e
commit 006abe887c
3 changed files with 21 additions and 21 deletions

View File

@ -30,7 +30,7 @@ struct rpc_inode;
* The high-level client handle * The high-level client handle
*/ */
struct rpc_clnt { struct rpc_clnt {
struct kref cl_kref; /* Number of references */ atomic_t cl_count; /* Number of references */
struct list_head cl_clients; /* Global list of clients */ struct list_head cl_clients; /* Global list of clients */
struct list_head cl_tasks; /* List of tasks */ struct list_head cl_tasks; /* List of tasks */
spinlock_t cl_lock; /* spinlock */ spinlock_t cl_lock; /* spinlock */

View File

@ -226,7 +226,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
goto out_no_principal; goto out_no_principal;
} }
kref_init(&clnt->cl_kref); atomic_set(&clnt->cl_count, 1);
err = rpc_setup_pipedir(clnt, program->pipe_dir_name); err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
if (err < 0) if (err < 0)
@ -390,14 +390,14 @@ rpc_clone_client(struct rpc_clnt *clnt)
if (new->cl_principal == NULL) if (new->cl_principal == NULL)
goto out_no_principal; goto out_no_principal;
} }
kref_init(&new->cl_kref); atomic_set(&new->cl_count, 1);
err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
if (err != 0) if (err != 0)
goto out_no_path; goto out_no_path;
if (new->cl_auth) if (new->cl_auth)
atomic_inc(&new->cl_auth->au_count); atomic_inc(&new->cl_auth->au_count);
xprt_get(clnt->cl_xprt); xprt_get(clnt->cl_xprt);
kref_get(&clnt->cl_kref); atomic_inc(&clnt->cl_count);
rpc_register_client(new); rpc_register_client(new);
rpciod_up(); rpciod_up();
return new; return new;
@ -465,10 +465,8 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client);
* Free an RPC client * Free an RPC client
*/ */
static void static void
rpc_free_client(struct kref *kref) rpc_free_client(struct rpc_clnt *clnt)
{ {
struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
dprintk("RPC: destroying %s client for %s\n", dprintk("RPC: destroying %s client for %s\n",
clnt->cl_protname, clnt->cl_server); clnt->cl_protname, clnt->cl_server);
if (!IS_ERR(clnt->cl_path.dentry)) { if (!IS_ERR(clnt->cl_path.dentry)) {
@ -495,12 +493,10 @@ rpc_free_client(struct kref *kref)
* Free an RPC client * Free an RPC client
*/ */
static void static void
rpc_free_auth(struct kref *kref) rpc_free_auth(struct rpc_clnt *clnt)
{ {
struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
if (clnt->cl_auth == NULL) { if (clnt->cl_auth == NULL) {
rpc_free_client(kref); rpc_free_client(clnt);
return; return;
} }
@ -509,10 +505,11 @@ rpc_free_auth(struct kref *kref)
* release remaining GSS contexts. This mechanism ensures * release remaining GSS contexts. This mechanism ensures
* that it can do so safely. * that it can do so safely.
*/ */
kref_init(kref); atomic_inc(&clnt->cl_count);
rpcauth_release(clnt->cl_auth); rpcauth_release(clnt->cl_auth);
clnt->cl_auth = NULL; clnt->cl_auth = NULL;
kref_put(kref, rpc_free_client); if (atomic_dec_and_test(&clnt->cl_count))
rpc_free_client(clnt);
} }
/* /*
@ -525,7 +522,8 @@ rpc_release_client(struct rpc_clnt *clnt)
if (list_empty(&clnt->cl_tasks)) if (list_empty(&clnt->cl_tasks))
wake_up(&destroy_wait); wake_up(&destroy_wait);
kref_put(&clnt->cl_kref, rpc_free_auth); if (atomic_dec_and_test(&clnt->cl_count))
rpc_free_auth(clnt);
} }
/** /**
@ -588,7 +586,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
if (clnt != NULL) { if (clnt != NULL) {
rpc_task_release_client(task); rpc_task_release_client(task);
task->tk_client = clnt; task->tk_client = clnt;
kref_get(&clnt->cl_kref); atomic_inc(&clnt->cl_count);
if (clnt->cl_softrtry) if (clnt->cl_softrtry)
task->tk_flags |= RPC_TASK_SOFT; task->tk_flags |= RPC_TASK_SOFT;
/* Add to the client's list of all tasks */ /* Add to the client's list of all tasks */

View File

@ -371,21 +371,23 @@ rpc_show_info(struct seq_file *m, void *v)
static int static int
rpc_info_open(struct inode *inode, struct file *file) rpc_info_open(struct inode *inode, struct file *file)
{ {
struct rpc_clnt *clnt; struct rpc_clnt *clnt = NULL;
int ret = single_open(file, rpc_show_info, NULL); int ret = single_open(file, rpc_show_info, NULL);
if (!ret) { if (!ret) {
struct seq_file *m = file->private_data; struct seq_file *m = file->private_data;
mutex_lock(&inode->i_mutex);
clnt = RPC_I(inode)->private; spin_lock(&file->f_path.dentry->d_lock);
if (clnt) { if (!d_unhashed(file->f_path.dentry))
kref_get(&clnt->cl_kref); clnt = RPC_I(inode)->private;
if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) {
spin_unlock(&file->f_path.dentry->d_lock);
m->private = clnt; m->private = clnt;
} else { } else {
spin_unlock(&file->f_path.dentry->d_lock);
single_release(inode, file); single_release(inode, file);
ret = -EINVAL; ret = -EINVAL;
} }
mutex_unlock(&inode->i_mutex);
} }
return ret; return ret;
} }