mirror of https://gitee.com/openkylin/linux.git
Merge branch 'devel' into next
This commit is contained in:
commit
08cc36cbd1
|
@ -14,6 +14,7 @@
|
|||
#include <linux/sunrpc/svc.h>
|
||||
#include <linux/lockd/lockd.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#define NLMDBG_FACILITY NLMDBG_CLIENT
|
||||
|
||||
|
@ -60,7 +61,7 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
|
|||
|
||||
host = nlmclnt_lookup_host(nlm_init->address, nlm_init->addrlen,
|
||||
nlm_init->protocol, nlm_version,
|
||||
nlm_init->hostname);
|
||||
nlm_init->hostname, nlm_init->noresvport);
|
||||
if (host == NULL) {
|
||||
lockd_down();
|
||||
return ERR_PTR(-ENOLCK);
|
||||
|
@ -191,11 +192,15 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
|
|||
void
|
||||
nlmclnt_recovery(struct nlm_host *host)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
if (!host->h_reclaiming++) {
|
||||
nlm_get_host(host);
|
||||
__module_get(THIS_MODULE);
|
||||
if (kernel_thread(reclaimer, host, CLONE_FS | CLONE_FILES) < 0)
|
||||
module_put(THIS_MODULE);
|
||||
task = kthread_run(reclaimer, host, "%s-reclaim", host->h_name);
|
||||
if (IS_ERR(task))
|
||||
printk(KERN_ERR "lockd: unable to spawn reclaimer "
|
||||
"thread. Locks for %s won't be reclaimed! "
|
||||
"(%ld)\n", host->h_name, PTR_ERR(task));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -207,7 +212,6 @@ reclaimer(void *ptr)
|
|||
struct file_lock *fl, *next;
|
||||
u32 nsmstate;
|
||||
|
||||
daemonize("%s-reclaim", host->h_name);
|
||||
allow_signal(SIGKILL);
|
||||
|
||||
down_write(&host->h_rwsem);
|
||||
|
@ -233,7 +237,12 @@ reclaimer(void *ptr)
|
|||
list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
|
||||
list_del_init(&fl->fl_u.nfs_fl.list);
|
||||
|
||||
/* Why are we leaking memory here? --okir */
|
||||
/*
|
||||
* sending this thread a SIGKILL will result in any unreclaimed
|
||||
* locks being removed from the h_granted list. This means that
|
||||
* the kernel will not attempt to reclaim them again if a new
|
||||
* reclaimer thread is spawned for this host.
|
||||
*/
|
||||
if (signalled())
|
||||
continue;
|
||||
if (nlmclnt_reclaim(host, fl) != 0)
|
||||
|
@ -261,5 +270,5 @@ reclaimer(void *ptr)
|
|||
nlm_release_host(host);
|
||||
lockd_down();
|
||||
unlock_kernel();
|
||||
module_put_and_exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ struct nlm_lookup_host_info {
|
|||
const size_t hostname_len; /* it's length */
|
||||
const struct sockaddr *src_sap; /* our address (optional) */
|
||||
const size_t src_len; /* it's length */
|
||||
const int noresvport; /* use non-priv port */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -222,6 +223,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
|
|||
host->h_nsmstate = 0; /* real NSM state */
|
||||
host->h_nsmhandle = nsm;
|
||||
host->h_server = ni->server;
|
||||
host->h_noresvport = ni->noresvport;
|
||||
hlist_add_head(&host->h_hash, chain);
|
||||
INIT_LIST_HEAD(&host->h_lockowners);
|
||||
spin_lock_init(&host->h_lock);
|
||||
|
@ -272,6 +274,7 @@ nlm_destroy_host(struct nlm_host *host)
|
|||
* @protocol: transport protocol to use
|
||||
* @version: NLM protocol version
|
||||
* @hostname: '\0'-terminated hostname of server
|
||||
* @noresvport: 1 if non-privileged port should be used
|
||||
*
|
||||
* Returns an nlm_host structure that matches the passed-in
|
||||
* [server address, transport protocol, NLM version, server hostname].
|
||||
|
@ -281,7 +284,9 @@ nlm_destroy_host(struct nlm_host *host)
|
|||
struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
|
||||
const size_t salen,
|
||||
const unsigned short protocol,
|
||||
const u32 version, const char *hostname)
|
||||
const u32 version,
|
||||
const char *hostname,
|
||||
int noresvport)
|
||||
{
|
||||
const struct sockaddr source = {
|
||||
.sa_family = AF_UNSPEC,
|
||||
|
@ -296,6 +301,7 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
|
|||
.hostname_len = strlen(hostname),
|
||||
.src_sap = &source,
|
||||
.src_len = sizeof(source),
|
||||
.noresvport = noresvport,
|
||||
};
|
||||
|
||||
dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__,
|
||||
|
@ -417,6 +423,8 @@ nlm_bind_host(struct nlm_host *host)
|
|||
*/
|
||||
if (!host->h_server)
|
||||
args.flags |= RPC_CLNT_CREATE_HARDRTRY;
|
||||
if (host->h_noresvport)
|
||||
args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
|
||||
|
||||
clnt = rpc_create(&args);
|
||||
if (!IS_ERR(clnt))
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
static struct svc_program nlmsvc_program;
|
||||
|
||||
struct nlmsvc_binding * nlmsvc_ops;
|
||||
EXPORT_SYMBOL(nlmsvc_ops);
|
||||
EXPORT_SYMBOL_GPL(nlmsvc_ops);
|
||||
|
||||
static DEFINE_MUTEX(nlmsvc_mutex);
|
||||
static unsigned int nlmsvc_users;
|
||||
|
@ -300,7 +300,7 @@ int lockd_up(void)
|
|||
mutex_unlock(&nlmsvc_mutex);
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL(lockd_up);
|
||||
EXPORT_SYMBOL_GPL(lockd_up);
|
||||
|
||||
/*
|
||||
* Decrement the user count and bring down lockd if we're the last.
|
||||
|
@ -329,7 +329,7 @@ lockd_down(void)
|
|||
out:
|
||||
mutex_unlock(&nlmsvc_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(lockd_down);
|
||||
EXPORT_SYMBOL_GPL(lockd_down);
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sunrpc/svcauth_gss.h>
|
||||
|
||||
#include <net/inet_sock.h>
|
||||
|
||||
|
@ -182,10 +183,34 @@ void nfs_callback_down(void)
|
|||
mutex_unlock(&nfs_callback_mutex);
|
||||
}
|
||||
|
||||
static int check_gss_callback_principal(struct nfs_client *clp,
|
||||
struct svc_rqst *rqstp)
|
||||
{
|
||||
struct rpc_clnt *r = clp->cl_rpcclient;
|
||||
char *p = svc_gss_principal(rqstp);
|
||||
|
||||
/*
|
||||
* It might just be a normal user principal, in which case
|
||||
* userspace won't bother to tell us the name at all.
|
||||
*/
|
||||
if (p == NULL)
|
||||
return SVC_DENIED;
|
||||
|
||||
/* Expect a GSS_C_NT_HOSTBASED_NAME like "nfs@serverhostname" */
|
||||
|
||||
if (memcmp(p, "nfs@", 4) != 0)
|
||||
return SVC_DENIED;
|
||||
p += 4;
|
||||
if (strcmp(p, r->cl_server) != 0)
|
||||
return SVC_DENIED;
|
||||
return SVC_OK;
|
||||
}
|
||||
|
||||
static int nfs_callback_authenticate(struct svc_rqst *rqstp)
|
||||
{
|
||||
struct nfs_client *clp;
|
||||
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
|
||||
int ret = SVC_OK;
|
||||
|
||||
/* Don't talk to strangers */
|
||||
clp = nfs_find_client(svc_addr(rqstp), 4);
|
||||
|
@ -194,21 +219,22 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp)
|
|||
|
||||
dprintk("%s: %s NFSv4 callback!\n", __func__,
|
||||
svc_print_addr(rqstp, buf, sizeof(buf)));
|
||||
nfs_put_client(clp);
|
||||
|
||||
switch (rqstp->rq_authop->flavour) {
|
||||
case RPC_AUTH_NULL:
|
||||
if (rqstp->rq_proc != CB_NULL)
|
||||
return SVC_DENIED;
|
||||
ret = SVC_DENIED;
|
||||
break;
|
||||
case RPC_AUTH_UNIX:
|
||||
break;
|
||||
case RPC_AUTH_GSS:
|
||||
/* FIXME: RPCSEC_GSS handling? */
|
||||
ret = check_gss_callback_principal(clp, rqstp);
|
||||
break;
|
||||
default:
|
||||
return SVC_DENIED;
|
||||
ret = SVC_DENIED;
|
||||
}
|
||||
return SVC_OK;
|
||||
nfs_put_client(clp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -143,7 +143,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
|
|||
clp->cl_proto = cl_init->proto;
|
||||
|
||||
#ifdef CONFIG_NFS_V4
|
||||
init_rwsem(&clp->cl_sem);
|
||||
INIT_LIST_HEAD(&clp->cl_delegations);
|
||||
spin_lock_init(&clp->cl_lock);
|
||||
INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
|
||||
|
@ -224,31 +223,54 @@ void nfs_put_client(struct nfs_client *clp)
|
|||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
static const struct in6_addr *nfs_map_ipv4_addr(const struct sockaddr *sa, struct in6_addr *addr_mapped)
|
||||
{
|
||||
switch (sa->sa_family) {
|
||||
default:
|
||||
return NULL;
|
||||
case AF_INET6:
|
||||
return &((const struct sockaddr_in6 *)sa)->sin6_addr;
|
||||
break;
|
||||
case AF_INET:
|
||||
ipv6_addr_set_v4mapped(((const struct sockaddr_in *)sa)->sin_addr.s_addr,
|
||||
addr_mapped);
|
||||
return addr_mapped;
|
||||
}
|
||||
}
|
||||
|
||||
static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1,
|
||||
const struct sockaddr *sa2)
|
||||
{
|
||||
const struct in6_addr *addr1;
|
||||
const struct in6_addr *addr2;
|
||||
struct in6_addr addr1_mapped;
|
||||
struct in6_addr addr2_mapped;
|
||||
|
||||
addr1 = nfs_map_ipv4_addr(sa1, &addr1_mapped);
|
||||
if (likely(addr1 != NULL)) {
|
||||
addr2 = nfs_map_ipv4_addr(sa2, &addr2_mapped);
|
||||
if (likely(addr2 != NULL))
|
||||
return ipv6_addr_equal(addr1, addr2);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int nfs_sockaddr_match_ipaddr4(const struct sockaddr_in *sa1,
|
||||
const struct sockaddr_in *sa2)
|
||||
{
|
||||
return sa1->sin_addr.s_addr == sa2->sin_addr.s_addr;
|
||||
}
|
||||
|
||||
static int nfs_sockaddr_match_ipaddr6(const struct sockaddr_in6 *sa1,
|
||||
const struct sockaddr_in6 *sa2)
|
||||
{
|
||||
return ipv6_addr_equal(&sa1->sin6_addr, &sa2->sin6_addr);
|
||||
}
|
||||
|
||||
static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1,
|
||||
const struct sockaddr *sa2)
|
||||
{
|
||||
switch (sa1->sa_family) {
|
||||
case AF_INET:
|
||||
return nfs_sockaddr_match_ipaddr4((const struct sockaddr_in *)sa1,
|
||||
(const struct sockaddr_in *)sa2);
|
||||
case AF_INET6:
|
||||
return nfs_sockaddr_match_ipaddr6((const struct sockaddr_in6 *)sa1,
|
||||
(const struct sockaddr_in6 *)sa2);
|
||||
}
|
||||
BUG();
|
||||
if (unlikely(sa1->sa_family != AF_INET || sa2->sa_family != AF_INET))
|
||||
return 0;
|
||||
return nfs_sockaddr_match_ipaddr4((const struct sockaddr_in *)sa1,
|
||||
(const struct sockaddr_in *)sa2);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Find a client by IP address and protocol version
|
||||
|
@ -270,8 +292,6 @@ struct nfs_client *nfs_find_client(const struct sockaddr *addr, u32 nfsversion)
|
|||
if (clp->rpc_ops->version != nfsversion)
|
||||
continue;
|
||||
|
||||
if (addr->sa_family != clap->sa_family)
|
||||
continue;
|
||||
/* Match only the IP address, not the port number */
|
||||
if (!nfs_sockaddr_match_ipaddr(addr, clap))
|
||||
continue;
|
||||
|
@ -305,8 +325,6 @@ struct nfs_client *nfs_find_client_next(struct nfs_client *clp)
|
|||
if (clp->rpc_ops->version != nfsvers)
|
||||
continue;
|
||||
|
||||
if (sap->sa_family != clap->sa_family)
|
||||
continue;
|
||||
/* Match only the IP address, not the port number */
|
||||
if (!nfs_sockaddr_match_ipaddr(sap, clap))
|
||||
continue;
|
||||
|
@ -470,7 +488,7 @@ static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
|
|||
static int nfs_create_rpc_client(struct nfs_client *clp,
|
||||
const struct rpc_timeout *timeparms,
|
||||
rpc_authflavor_t flavor,
|
||||
int flags)
|
||||
int discrtry, int noresvport)
|
||||
{
|
||||
struct rpc_clnt *clnt = NULL;
|
||||
struct rpc_create_args args = {
|
||||
|
@ -482,9 +500,13 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
|
|||
.program = &nfs_program,
|
||||
.version = clp->rpc_ops->version,
|
||||
.authflavor = flavor,
|
||||
.flags = flags,
|
||||
};
|
||||
|
||||
if (discrtry)
|
||||
args.flags |= RPC_CLNT_CREATE_DISCRTRY;
|
||||
if (noresvport)
|
||||
args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
|
||||
|
||||
if (!IS_ERR(clp->cl_rpcclient))
|
||||
return 0;
|
||||
|
||||
|
@ -522,6 +544,8 @@ static int nfs_start_lockd(struct nfs_server *server)
|
|||
.protocol = server->flags & NFS_MOUNT_TCP ?
|
||||
IPPROTO_TCP : IPPROTO_UDP,
|
||||
.nfs_version = clp->rpc_ops->version,
|
||||
.noresvport = server->flags & NFS_MOUNT_NORESVPORT ?
|
||||
1 : 0,
|
||||
};
|
||||
|
||||
if (nlm_init.nfs_version > 3)
|
||||
|
@ -623,7 +647,8 @@ static int nfs_init_client(struct nfs_client *clp,
|
|||
* Create a client RPC handle for doing FSSTAT with UNIX auth only
|
||||
* - RFC 2623, sec 2.3.2
|
||||
*/
|
||||
error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX, 0);
|
||||
error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX,
|
||||
0, data->flags & NFS_MOUNT_NORESVPORT);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
nfs_mark_client_ready(clp, NFS_CS_READY);
|
||||
|
@ -965,7 +990,8 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
|
|||
static int nfs4_init_client(struct nfs_client *clp,
|
||||
const struct rpc_timeout *timeparms,
|
||||
const char *ip_addr,
|
||||
rpc_authflavor_t authflavour)
|
||||
rpc_authflavor_t authflavour,
|
||||
int flags)
|
||||
{
|
||||
int error;
|
||||
|
||||
|
@ -979,7 +1005,7 @@ static int nfs4_init_client(struct nfs_client *clp,
|
|||
clp->rpc_ops = &nfs_v4_clientops;
|
||||
|
||||
error = nfs_create_rpc_client(clp, timeparms, authflavour,
|
||||
RPC_CLNT_CREATE_DISCRTRY);
|
||||
1, flags & NFS_MOUNT_NORESVPORT);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
memcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
|
||||
|
@ -1030,7 +1056,8 @@ static int nfs4_set_client(struct nfs_server *server,
|
|||
error = PTR_ERR(clp);
|
||||
goto error;
|
||||
}
|
||||
error = nfs4_init_client(clp, timeparms, ip_addr, authflavour);
|
||||
error = nfs4_init_client(clp, timeparms, ip_addr, authflavour,
|
||||
server->flags);
|
||||
if (error < 0)
|
||||
goto error_put;
|
||||
|
||||
|
@ -1059,6 +1086,10 @@ static int nfs4_init_server(struct nfs_server *server,
|
|||
nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
|
||||
data->timeo, data->retrans);
|
||||
|
||||
/* Initialise the client representation from the mount data */
|
||||
server->flags = data->flags;
|
||||
server->caps |= NFS_CAP_ATOMIC_OPEN;
|
||||
|
||||
/* Get a client record */
|
||||
error = nfs4_set_client(server,
|
||||
data->nfs_server.hostname,
|
||||
|
@ -1071,10 +1102,6 @@ static int nfs4_init_server(struct nfs_server *server,
|
|||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
/* Initialise the client representation from the mount data */
|
||||
server->flags = data->flags;
|
||||
server->caps |= NFS_CAP_ATOMIC_OPEN;
|
||||
|
||||
if (data->rsize)
|
||||
server->rsize = nfs_block_size(data->rsize, NULL);
|
||||
if (data->wsize)
|
||||
|
@ -1177,6 +1204,10 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
|
|||
parent_server = NFS_SB(data->sb);
|
||||
parent_client = parent_server->nfs_client;
|
||||
|
||||
/* Initialise the client representation from the parent server */
|
||||
nfs_server_copy_userdata(server, parent_server);
|
||||
server->caps |= NFS_CAP_ATOMIC_OPEN;
|
||||
|
||||
/* Get a client representation.
|
||||
* Note: NFSv4 always uses TCP, */
|
||||
error = nfs4_set_client(server, data->hostname,
|
||||
|
@ -1189,10 +1220,6 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
|
|||
if (error < 0)
|
||||
goto error;
|
||||
|
||||
/* Initialise the client representation from the parent server */
|
||||
nfs_server_copy_userdata(server, parent_server);
|
||||
server->caps |= NFS_CAP_ATOMIC_OPEN;
|
||||
|
||||
error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
|
|
@ -43,6 +43,27 @@ static void nfs_free_delegation(struct nfs_delegation *delegation)
|
|||
put_rpccred(cred);
|
||||
}
|
||||
|
||||
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
|
||||
{
|
||||
set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
|
||||
}
|
||||
|
||||
int nfs_have_delegation(struct inode *inode, fmode_t flags)
|
||||
{
|
||||
struct nfs_delegation *delegation;
|
||||
int ret = 0;
|
||||
|
||||
flags &= FMODE_READ|FMODE_WRITE;
|
||||
rcu_read_lock();
|
||||
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
||||
if (delegation != NULL && (delegation->type & flags) == flags) {
|
||||
nfs_mark_delegation_referenced(delegation);
|
||||
ret = 1;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
|
||||
{
|
||||
struct inode *inode = state->inode;
|
||||
|
@ -119,7 +140,7 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, st
|
|||
delegation->maxsize = res->maxsize;
|
||||
oldcred = delegation->cred;
|
||||
delegation->cred = get_rpccred(cred);
|
||||
delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
|
||||
clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
|
||||
NFS_I(inode)->delegation_state = delegation->type;
|
||||
smp_wmb();
|
||||
put_rpccred(oldcred);
|
||||
|
@ -134,19 +155,35 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *
|
|||
return res;
|
||||
}
|
||||
|
||||
static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
|
||||
{
|
||||
struct inode *inode = NULL;
|
||||
|
||||
spin_lock(&delegation->lock);
|
||||
if (delegation->inode != NULL)
|
||||
inode = igrab(delegation->inode);
|
||||
spin_unlock(&delegation->lock);
|
||||
return inode;
|
||||
}
|
||||
|
||||
static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
|
||||
{
|
||||
struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
|
||||
|
||||
if (delegation == NULL)
|
||||
goto nomatch;
|
||||
spin_lock(&delegation->lock);
|
||||
if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
|
||||
sizeof(delegation->stateid.data)) != 0)
|
||||
goto nomatch;
|
||||
goto nomatch_unlock;
|
||||
list_del_rcu(&delegation->super_list);
|
||||
delegation->inode = NULL;
|
||||
nfsi->delegation_state = 0;
|
||||
rcu_assign_pointer(nfsi->delegation, NULL);
|
||||
spin_unlock(&delegation->lock);
|
||||
return delegation;
|
||||
nomatch_unlock:
|
||||
spin_unlock(&delegation->lock);
|
||||
nomatch:
|
||||
return NULL;
|
||||
}
|
||||
|
@ -172,6 +209,8 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
|
|||
delegation->change_attr = nfsi->change_attr;
|
||||
delegation->cred = get_rpccred(cred);
|
||||
delegation->inode = inode;
|
||||
delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
|
||||
spin_lock_init(&delegation->lock);
|
||||
|
||||
spin_lock(&clp->cl_lock);
|
||||
if (rcu_dereference(nfsi->delegation) != NULL) {
|
||||
|
@ -226,21 +265,46 @@ static void nfs_msync_inode(struct inode *inode)
|
|||
*/
|
||||
static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
|
||||
{
|
||||
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
|
||||
nfs_msync_inode(inode);
|
||||
down_read(&clp->cl_sem);
|
||||
/* Guard against new delegated open calls */
|
||||
down_write(&nfsi->rwsem);
|
||||
nfs_delegation_claim_opens(inode, &delegation->stateid);
|
||||
up_write(&nfsi->rwsem);
|
||||
up_read(&clp->cl_sem);
|
||||
nfs_msync_inode(inode);
|
||||
|
||||
return nfs_do_return_delegation(inode, delegation, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return all delegations that have been marked for return
|
||||
*/
|
||||
void nfs_client_return_marked_delegations(struct nfs_client *clp)
|
||||
{
|
||||
struct nfs_delegation *delegation;
|
||||
struct inode *inode;
|
||||
|
||||
restart:
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
|
||||
if (!test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
|
||||
continue;
|
||||
inode = nfs_delegation_grab_inode(delegation);
|
||||
if (inode == NULL)
|
||||
continue;
|
||||
spin_lock(&clp->cl_lock);
|
||||
delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
rcu_read_unlock();
|
||||
if (delegation != NULL)
|
||||
__nfs_inode_return_delegation(inode, delegation);
|
||||
iput(inode);
|
||||
goto restart;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* This function returns the delegation without reclaiming opens
|
||||
* or protecting against delegation reclaims.
|
||||
|
@ -279,83 +343,55 @@ int nfs_inode_return_delegation(struct inode *inode)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void nfs_mark_return_delegation(struct nfs_client *clp, struct nfs_delegation *delegation)
|
||||
{
|
||||
set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
|
||||
set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return all delegations associated to a super block
|
||||
*/
|
||||
void nfs_return_all_delegations(struct super_block *sb)
|
||||
void nfs_super_return_all_delegations(struct super_block *sb)
|
||||
{
|
||||
struct nfs_client *clp = NFS_SB(sb)->nfs_client;
|
||||
struct nfs_delegation *delegation;
|
||||
struct inode *inode;
|
||||
|
||||
if (clp == NULL)
|
||||
return;
|
||||
restart:
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
|
||||
if (delegation->inode->i_sb != sb)
|
||||
continue;
|
||||
inode = igrab(delegation->inode);
|
||||
if (inode == NULL)
|
||||
continue;
|
||||
spin_lock(&clp->cl_lock);
|
||||
delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
rcu_read_unlock();
|
||||
if (delegation != NULL)
|
||||
__nfs_inode_return_delegation(inode, delegation);
|
||||
iput(inode);
|
||||
goto restart;
|
||||
spin_lock(&delegation->lock);
|
||||
if (delegation->inode != NULL && delegation->inode->i_sb == sb)
|
||||
set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
|
||||
spin_unlock(&delegation->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
nfs_client_return_marked_delegations(clp);
|
||||
}
|
||||
|
||||
static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
|
||||
{
|
||||
struct nfs_delegation *delegation;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
|
||||
set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
|
||||
set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int nfs_do_expire_all_delegations(void *ptr)
|
||||
static void nfs_delegation_run_state_manager(struct nfs_client *clp)
|
||||
{
|
||||
struct nfs_client *clp = ptr;
|
||||
struct nfs_delegation *delegation;
|
||||
struct inode *inode;
|
||||
|
||||
allow_signal(SIGKILL);
|
||||
restart:
|
||||
if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
|
||||
goto out;
|
||||
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
|
||||
goto out;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
|
||||
inode = igrab(delegation->inode);
|
||||
if (inode == NULL)
|
||||
continue;
|
||||
spin_lock(&clp->cl_lock);
|
||||
delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
rcu_read_unlock();
|
||||
if (delegation)
|
||||
__nfs_inode_return_delegation(inode, delegation);
|
||||
iput(inode);
|
||||
goto restart;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
nfs_put_client(clp);
|
||||
module_put_and_exit(0);
|
||||
if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
|
||||
nfs4_schedule_state_manager(clp);
|
||||
}
|
||||
|
||||
void nfs_expire_all_delegations(struct nfs_client *clp)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
__module_get(THIS_MODULE);
|
||||
atomic_inc(&clp->cl_count);
|
||||
task = kthread_run(nfs_do_expire_all_delegations, clp,
|
||||
"%s-delegreturn",
|
||||
rpc_peeraddr2str(clp->cl_rpcclient,
|
||||
RPC_DISPLAY_ADDR));
|
||||
if (!IS_ERR(task))
|
||||
return;
|
||||
nfs_put_client(clp);
|
||||
module_put(THIS_MODULE);
|
||||
nfs_client_mark_return_all_delegations(clp);
|
||||
nfs_delegation_run_state_manager(clp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -363,68 +399,29 @@ void nfs_expire_all_delegations(struct nfs_client *clp)
|
|||
*/
|
||||
void nfs_handle_cb_pathdown(struct nfs_client *clp)
|
||||
{
|
||||
struct nfs_delegation *delegation;
|
||||
struct inode *inode;
|
||||
|
||||
if (clp == NULL)
|
||||
return;
|
||||
restart:
|
||||
nfs_client_mark_return_all_delegations(clp);
|
||||
}
|
||||
|
||||
static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *clp)
|
||||
{
|
||||
struct nfs_delegation *delegation;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
|
||||
inode = igrab(delegation->inode);
|
||||
if (inode == NULL)
|
||||
if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
|
||||
continue;
|
||||
spin_lock(&clp->cl_lock);
|
||||
delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
rcu_read_unlock();
|
||||
if (delegation != NULL)
|
||||
__nfs_inode_return_delegation(inode, delegation);
|
||||
iput(inode);
|
||||
goto restart;
|
||||
set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
|
||||
set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
struct recall_threadargs {
|
||||
struct inode *inode;
|
||||
struct nfs_client *clp;
|
||||
const nfs4_stateid *stateid;
|
||||
|
||||
struct completion started;
|
||||
int result;
|
||||
};
|
||||
|
||||
static int recall_thread(void *data)
|
||||
void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
|
||||
{
|
||||
struct recall_threadargs *args = (struct recall_threadargs *)data;
|
||||
struct inode *inode = igrab(args->inode);
|
||||
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
struct nfs_delegation *delegation;
|
||||
|
||||
daemonize("nfsv4-delegreturn");
|
||||
|
||||
nfs_msync_inode(inode);
|
||||
down_read(&clp->cl_sem);
|
||||
down_write(&nfsi->rwsem);
|
||||
spin_lock(&clp->cl_lock);
|
||||
delegation = nfs_detach_delegation_locked(nfsi, args->stateid);
|
||||
if (delegation != NULL)
|
||||
args->result = 0;
|
||||
else
|
||||
args->result = -ENOENT;
|
||||
spin_unlock(&clp->cl_lock);
|
||||
complete(&args->started);
|
||||
nfs_delegation_claim_opens(inode, args->stateid);
|
||||
up_write(&nfsi->rwsem);
|
||||
up_read(&clp->cl_sem);
|
||||
nfs_msync_inode(inode);
|
||||
|
||||
if (delegation != NULL)
|
||||
nfs_do_return_delegation(inode, delegation, 1);
|
||||
iput(inode);
|
||||
module_put_and_exit(0);
|
||||
nfs_client_mark_return_unreferenced_delegations(clp);
|
||||
nfs_delegation_run_state_manager(clp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -432,22 +429,20 @@ static int recall_thread(void *data)
|
|||
*/
|
||||
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
|
||||
{
|
||||
struct recall_threadargs data = {
|
||||
.inode = inode,
|
||||
.stateid = stateid,
|
||||
};
|
||||
int status;
|
||||
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
||||
struct nfs_delegation *delegation;
|
||||
|
||||
init_completion(&data.started);
|
||||
__module_get(THIS_MODULE);
|
||||
status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
|
||||
if (status < 0)
|
||||
goto out_module_put;
|
||||
wait_for_completion(&data.started);
|
||||
return data.result;
|
||||
out_module_put:
|
||||
module_put(THIS_MODULE);
|
||||
return status;
|
||||
rcu_read_lock();
|
||||
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
||||
if (delegation == NULL || memcmp(delegation->stateid.data, stateid->data,
|
||||
sizeof(delegation->stateid.data)) != 0) {
|
||||
rcu_read_unlock();
|
||||
return -ENOENT;
|
||||
}
|
||||
nfs_mark_return_delegation(clp, delegation);
|
||||
rcu_read_unlock();
|
||||
nfs_delegation_run_state_manager(clp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -459,10 +454,14 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs
|
|||
struct inode *res = NULL;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
|
||||
if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
|
||||
spin_lock(&delegation->lock);
|
||||
if (delegation->inode != NULL &&
|
||||
nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
|
||||
res = igrab(delegation->inode);
|
||||
break;
|
||||
}
|
||||
spin_unlock(&delegation->lock);
|
||||
if (res != NULL)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return res;
|
||||
|
@ -476,7 +475,7 @@ void nfs_delegation_mark_reclaim(struct nfs_client *clp)
|
|||
struct nfs_delegation *delegation;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
|
||||
delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
|
||||
set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
@ -486,17 +485,22 @@ void nfs_delegation_mark_reclaim(struct nfs_client *clp)
|
|||
void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
|
||||
{
|
||||
struct nfs_delegation *delegation;
|
||||
struct inode *inode;
|
||||
restart:
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
|
||||
if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
|
||||
if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0)
|
||||
continue;
|
||||
inode = nfs_delegation_grab_inode(delegation);
|
||||
if (inode == NULL)
|
||||
continue;
|
||||
spin_lock(&clp->cl_lock);
|
||||
delegation = nfs_detach_delegation_locked(NFS_I(delegation->inode), NULL);
|
||||
delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
rcu_read_unlock();
|
||||
if (delegation != NULL)
|
||||
nfs_free_delegation(delegation);
|
||||
iput(inode);
|
||||
goto restart;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -17,14 +17,20 @@ struct nfs_delegation {
|
|||
struct rpc_cred *cred;
|
||||
struct inode *inode;
|
||||
nfs4_stateid stateid;
|
||||
int type;
|
||||
#define NFS_DELEGATION_NEED_RECLAIM 1
|
||||
long flags;
|
||||
fmode_t type;
|
||||
loff_t maxsize;
|
||||
__u64 change_attr;
|
||||
unsigned long flags;
|
||||
spinlock_t lock;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
enum {
|
||||
NFS_DELEGATION_NEED_RECLAIM = 0,
|
||||
NFS_DELEGATION_RETURN,
|
||||
NFS_DELEGATION_REFERENCED,
|
||||
};
|
||||
|
||||
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
|
||||
void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
|
||||
int nfs_inode_return_delegation(struct inode *inode);
|
||||
|
@ -32,9 +38,11 @@ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *s
|
|||
void nfs_inode_return_delegation_noreclaim(struct inode *inode);
|
||||
|
||||
struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
|
||||
void nfs_return_all_delegations(struct super_block *sb);
|
||||
void nfs_super_return_all_delegations(struct super_block *sb);
|
||||
void nfs_expire_all_delegations(struct nfs_client *clp);
|
||||
void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
|
||||
void nfs_handle_cb_pathdown(struct nfs_client *clp);
|
||||
void nfs_client_return_marked_delegations(struct nfs_client *clp);
|
||||
|
||||
void nfs_delegation_mark_reclaim(struct nfs_client *clp);
|
||||
void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
|
||||
|
@ -45,22 +53,11 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
|
|||
int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl);
|
||||
int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
|
||||
|
||||
static inline int nfs_have_delegation(struct inode *inode, int flags)
|
||||
{
|
||||
struct nfs_delegation *delegation;
|
||||
int ret = 0;
|
||||
|
||||
flags &= FMODE_READ|FMODE_WRITE;
|
||||
rcu_read_lock();
|
||||
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
||||
if (delegation != NULL && (delegation->type & flags) == flags)
|
||||
ret = 1;
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
|
||||
int nfs_have_delegation(struct inode *inode, fmode_t flags);
|
||||
|
||||
#else
|
||||
static inline int nfs_have_delegation(struct inode *inode, int flags)
|
||||
static inline int nfs_have_delegation(struct inode *inode, fmode_t flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
24
fs/nfs/dir.c
24
fs/nfs/dir.c
|
@ -799,6 +799,9 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
|
|||
goto out_bad;
|
||||
}
|
||||
|
||||
if (nfs_have_delegation(inode, FMODE_READ))
|
||||
goto out_set_verifier;
|
||||
|
||||
/* Force a full look up iff the parent directory has changed */
|
||||
if (!nfs_is_exclusive_create(dir, nd) && nfs_check_verifier(dir, dentry)) {
|
||||
if (nfs_lookup_verify_inode(inode, nd))
|
||||
|
@ -817,6 +820,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
|
|||
if ((error = nfs_refresh_inode(inode, &fattr)) != 0)
|
||||
goto out_bad;
|
||||
|
||||
out_set_verifier:
|
||||
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
||||
out_valid:
|
||||
dput(parent);
|
||||
|
@ -973,7 +977,7 @@ struct dentry_operations nfs4_dentry_operations = {
|
|||
* Use intent information to determine whether we need to substitute
|
||||
* the NFSv4-style stateful OPEN for the LOOKUP call
|
||||
*/
|
||||
static int is_atomic_open(struct inode *dir, struct nameidata *nd)
|
||||
static int is_atomic_open(struct nameidata *nd)
|
||||
{
|
||||
if (nd == NULL || nfs_lookup_check_intent(nd, LOOKUP_OPEN) == 0)
|
||||
return 0;
|
||||
|
@ -996,7 +1000,7 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
|
|||
dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
|
||||
|
||||
/* Check that we are indeed trying to open this file */
|
||||
if (!is_atomic_open(dir, nd))
|
||||
if (!is_atomic_open(nd))
|
||||
goto no_open;
|
||||
|
||||
if (dentry->d_name.len > NFS_SERVER(dir)->namelen) {
|
||||
|
@ -1047,10 +1051,10 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
|
|||
struct inode *dir;
|
||||
int openflags, ret = 0;
|
||||
|
||||
if (!is_atomic_open(nd))
|
||||
goto no_open;
|
||||
parent = dget_parent(dentry);
|
||||
dir = parent->d_inode;
|
||||
if (!is_atomic_open(dir, nd))
|
||||
goto no_open;
|
||||
/* We can't create new files in nfs_open_revalidate(), so we
|
||||
* optimize away revalidation of negative dentries.
|
||||
*/
|
||||
|
@ -1062,11 +1066,11 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
|
|||
|
||||
/* NFS only supports OPEN on regular files */
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
goto no_open;
|
||||
goto no_open_dput;
|
||||
openflags = nd->intent.open.flags;
|
||||
/* We cannot do exclusive creation on a positive dentry */
|
||||
if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
|
||||
goto no_open;
|
||||
goto no_open_dput;
|
||||
/* We can't create new files, or truncate existing ones here */
|
||||
openflags &= ~(O_CREAT|O_TRUNC);
|
||||
|
||||
|
@ -1081,10 +1085,9 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
|
|||
if (!ret)
|
||||
d_drop(dentry);
|
||||
return ret;
|
||||
no_open:
|
||||
no_open_dput:
|
||||
dput(parent);
|
||||
if (inode != NULL && nfs_have_delegation(inode, FMODE_READ))
|
||||
return 1;
|
||||
no_open:
|
||||
return nfs_lookup_revalidate(dentry, nd);
|
||||
}
|
||||
#endif /* CONFIG_NFSV4 */
|
||||
|
@ -1794,7 +1797,8 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
|
|||
cache = nfs_access_search_rbtree(inode, cred);
|
||||
if (cache == NULL)
|
||||
goto out;
|
||||
if (!time_in_range(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
|
||||
if (!nfs_have_delegation(inode, FMODE_READ) &&
|
||||
!time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
|
||||
goto out_stale;
|
||||
res->jiffies = cache->jiffies;
|
||||
res->cred = cache->cred;
|
||||
|
|
|
@ -592,7 +592,7 @@ static void nfs_file_set_open_context(struct file *filp, struct nfs_open_context
|
|||
/*
|
||||
* Given an inode, search for an open context with the desired characteristics
|
||||
*/
|
||||
struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode)
|
||||
struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
struct nfs_open_context *pos, *ctx = NULL;
|
||||
|
@ -712,14 +712,7 @@ int nfs_attribute_timeout(struct inode *inode)
|
|||
|
||||
if (nfs_have_delegation(inode, FMODE_READ))
|
||||
return 0;
|
||||
/*
|
||||
* Special case: if the attribute timeout is set to 0, then always
|
||||
* treat the cache as having expired (unless holding
|
||||
* a delegation).
|
||||
*/
|
||||
if (nfsi->attrtimeo == 0)
|
||||
return 1;
|
||||
return !time_in_range(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
|
||||
return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1182,7 +1175,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|||
nfsi->attrtimeo_timestamp = now;
|
||||
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
|
||||
} else {
|
||||
if (!time_in_range(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
|
||||
if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
|
||||
if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode))
|
||||
nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
|
||||
nfsi->attrtimeo_timestamp = now;
|
||||
|
|
|
@ -63,6 +63,20 @@ struct nfs_parsed_mount_data {
|
|||
struct security_mnt_opts lsm_opts;
|
||||
};
|
||||
|
||||
/* mount_clnt.c */
|
||||
struct nfs_mount_request {
|
||||
struct sockaddr *sap;
|
||||
size_t salen;
|
||||
char *hostname;
|
||||
char *dirpath;
|
||||
u32 version;
|
||||
unsigned short protocol;
|
||||
struct nfs_fh *fh;
|
||||
int noresvport;
|
||||
};
|
||||
|
||||
extern int nfs_mount(struct nfs_mount_request *info);
|
||||
|
||||
/* client.c */
|
||||
extern struct rpc_program nfs_program;
|
||||
|
||||
|
|
|
@ -29,47 +29,43 @@ struct mnt_fhstatus {
|
|||
|
||||
/**
|
||||
* nfs_mount - Obtain an NFS file handle for the given host and path
|
||||
* @addr: pointer to server's address
|
||||
* @len: size of server's address
|
||||
* @hostname: name of server host, or NULL
|
||||
* @path: pointer to string containing export path to mount
|
||||
* @version: mount version to use for this request
|
||||
* @protocol: transport protocol to use for thie request
|
||||
* @fh: pointer to location to place returned file handle
|
||||
* @info: pointer to mount request arguments
|
||||
*
|
||||
* Uses default timeout parameters specified by underlying transport.
|
||||
*/
|
||||
int nfs_mount(struct sockaddr *addr, size_t len, char *hostname, char *path,
|
||||
int version, int protocol, struct nfs_fh *fh)
|
||||
int nfs_mount(struct nfs_mount_request *info)
|
||||
{
|
||||
struct mnt_fhstatus result = {
|
||||
.fh = fh
|
||||
.fh = info->fh
|
||||
};
|
||||
struct rpc_message msg = {
|
||||
.rpc_argp = path,
|
||||
.rpc_argp = info->dirpath,
|
||||
.rpc_resp = &result,
|
||||
};
|
||||
struct rpc_create_args args = {
|
||||
.protocol = protocol,
|
||||
.address = addr,
|
||||
.addrsize = len,
|
||||
.servername = hostname,
|
||||
.protocol = info->protocol,
|
||||
.address = info->sap,
|
||||
.addrsize = info->salen,
|
||||
.servername = info->hostname,
|
||||
.program = &mnt_program,
|
||||
.version = version,
|
||||
.version = info->version,
|
||||
.authflavor = RPC_AUTH_UNIX,
|
||||
.flags = 0,
|
||||
};
|
||||
struct rpc_clnt *mnt_clnt;
|
||||
int status;
|
||||
|
||||
dprintk("NFS: sending MNT request for %s:%s\n",
|
||||
(hostname ? hostname : "server"), path);
|
||||
(info->hostname ? info->hostname : "server"),
|
||||
info->dirpath);
|
||||
|
||||
if (info->noresvport)
|
||||
args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
|
||||
|
||||
mnt_clnt = rpc_create(&args);
|
||||
if (IS_ERR(mnt_clnt))
|
||||
goto out_clnt_err;
|
||||
|
||||
if (version == NFS_MNT3_VERSION)
|
||||
if (info->version == NFS_MNT3_VERSION)
|
||||
msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT];
|
||||
else
|
||||
msg.rpc_proc = &mnt_clnt->cl_procinfo[MNTPROC_MNT];
|
||||
|
|
|
@ -38,8 +38,12 @@ struct idmap;
|
|||
((err) != NFSERR_NOFILEHANDLE))
|
||||
|
||||
enum nfs4_client_state {
|
||||
NFS4CLNT_STATE_RECOVER = 0,
|
||||
NFS4CLNT_MANAGER_RUNNING = 0,
|
||||
NFS4CLNT_CHECK_LEASE,
|
||||
NFS4CLNT_LEASE_EXPIRED,
|
||||
NFS4CLNT_RECLAIM_REBOOT,
|
||||
NFS4CLNT_RECLAIM_NOGRACE,
|
||||
NFS4CLNT_DELEGRETURN,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -90,12 +94,18 @@ struct nfs4_state_owner {
|
|||
|
||||
spinlock_t so_lock;
|
||||
atomic_t so_count;
|
||||
unsigned long so_flags;
|
||||
struct list_head so_states;
|
||||
struct list_head so_delegations;
|
||||
struct nfs_seqid_counter so_seqid;
|
||||
struct rpc_sequence so_sequence;
|
||||
};
|
||||
|
||||
enum {
|
||||
NFS_OWNER_RECLAIM_REBOOT,
|
||||
NFS_OWNER_RECLAIM_NOGRACE
|
||||
};
|
||||
|
||||
/*
|
||||
* struct nfs4_state maintains the client-side state for a given
|
||||
* (state_owner,inode) tuple (OPEN) or state_owner (LOCK).
|
||||
|
@ -128,6 +138,8 @@ enum {
|
|||
NFS_O_RDONLY_STATE, /* OPEN stateid has read-only state */
|
||||
NFS_O_WRONLY_STATE, /* OPEN stateid has write-only state */
|
||||
NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */
|
||||
NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */
|
||||
NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
|
||||
};
|
||||
|
||||
struct nfs4_state {
|
||||
|
@ -149,7 +161,7 @@ struct nfs4_state {
|
|||
unsigned int n_rdonly; /* Number of read-only references */
|
||||
unsigned int n_wronly; /* Number of write-only references */
|
||||
unsigned int n_rdwr; /* Number of read/write references */
|
||||
int state; /* State on the server (R,W, or RW) */
|
||||
fmode_t state; /* State on the server (R,W, or RW) */
|
||||
atomic_t count;
|
||||
};
|
||||
|
||||
|
@ -157,9 +169,12 @@ struct nfs4_state {
|
|||
struct nfs4_exception {
|
||||
long timeout;
|
||||
int retry;
|
||||
struct nfs4_state *state;
|
||||
};
|
||||
|
||||
struct nfs4_state_recovery_ops {
|
||||
int owner_flag_bit;
|
||||
int state_flag_bit;
|
||||
int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
|
||||
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
|
||||
};
|
||||
|
@ -174,7 +189,6 @@ extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
|
|||
|
||||
|
||||
/* nfs4proc.c */
|
||||
extern int nfs4_map_errors(int err);
|
||||
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *);
|
||||
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *);
|
||||
extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
|
||||
|
@ -187,7 +201,7 @@ extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
|
|||
struct nfs4_fs_locations *fs_locations, struct page *page);
|
||||
|
||||
extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
|
||||
extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops;
|
||||
extern struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops;
|
||||
|
||||
extern const u32 nfs4_fattr_bitmap[2];
|
||||
extern const u32 nfs4_statfs_bitmap[2];
|
||||
|
@ -202,16 +216,18 @@ extern void nfs4_kill_renewd(struct nfs_client *);
|
|||
extern void nfs4_renew_state(struct work_struct *);
|
||||
|
||||
/* nfs4state.c */
|
||||
struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
|
||||
struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
|
||||
|
||||
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
|
||||
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
|
||||
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
|
||||
extern void nfs4_put_open_state(struct nfs4_state *);
|
||||
extern void nfs4_close_state(struct path *, struct nfs4_state *, mode_t);
|
||||
extern void nfs4_close_sync(struct path *, struct nfs4_state *, mode_t);
|
||||
extern void nfs4_state_set_mode_locked(struct nfs4_state *, mode_t);
|
||||
extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t);
|
||||
extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t);
|
||||
extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
|
||||
extern void nfs4_schedule_state_recovery(struct nfs_client *);
|
||||
extern void nfs4_schedule_state_manager(struct nfs_client *);
|
||||
extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);
|
||||
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
|
||||
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
|
||||
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
|
||||
|
|
|
@ -62,14 +62,12 @@
|
|||
struct nfs4_opendata;
|
||||
static int _nfs4_proc_open(struct nfs4_opendata *data);
|
||||
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
|
||||
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *);
|
||||
static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception);
|
||||
static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp);
|
||||
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
|
||||
static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
|
||||
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
|
||||
|
||||
/* Prevent leaks of NFSv4 errors into userland */
|
||||
int nfs4_map_errors(int err)
|
||||
static int nfs4_map_errors(int err)
|
||||
{
|
||||
if (err < -1000) {
|
||||
dprintk("%s could not handle NFSv4 error %d\n",
|
||||
|
@ -195,6 +193,83 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
|
|||
kunmap_atomic(start, KM_USER0);
|
||||
}
|
||||
|
||||
static int nfs4_wait_bit_killable(void *word)
|
||||
{
|
||||
if (fatal_signal_pending(current))
|
||||
return -ERESTARTSYS;
|
||||
schedule();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nfs4_wait_clnt_recover(struct nfs_client *clp)
|
||||
{
|
||||
int res;
|
||||
|
||||
might_sleep();
|
||||
|
||||
res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
|
||||
nfs4_wait_bit_killable, TASK_KILLABLE);
|
||||
return res;
|
||||
}
|
||||
|
||||
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
|
||||
{
|
||||
int res = 0;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (*timeout <= 0)
|
||||
*timeout = NFS4_POLL_RETRY_MIN;
|
||||
if (*timeout > NFS4_POLL_RETRY_MAX)
|
||||
*timeout = NFS4_POLL_RETRY_MAX;
|
||||
schedule_timeout_killable(*timeout);
|
||||
if (fatal_signal_pending(current))
|
||||
res = -ERESTARTSYS;
|
||||
*timeout <<= 1;
|
||||
return res;
|
||||
}
|
||||
|
||||
/* This is the error handling routine for processes that are allowed
|
||||
* to sleep.
|
||||
*/
|
||||
static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
|
||||
{
|
||||
struct nfs_client *clp = server->nfs_client;
|
||||
struct nfs4_state *state = exception->state;
|
||||
int ret = errorcode;
|
||||
|
||||
exception->retry = 0;
|
||||
switch(errorcode) {
|
||||
case 0:
|
||||
return 0;
|
||||
case -NFS4ERR_ADMIN_REVOKED:
|
||||
case -NFS4ERR_BAD_STATEID:
|
||||
case -NFS4ERR_OPENMODE:
|
||||
if (state == NULL)
|
||||
break;
|
||||
nfs4_state_mark_reclaim_nograce(clp, state);
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
case -NFS4ERR_STALE_STATEID:
|
||||
case -NFS4ERR_EXPIRED:
|
||||
nfs4_schedule_state_recovery(clp);
|
||||
ret = nfs4_wait_clnt_recover(clp);
|
||||
if (ret == 0)
|
||||
exception->retry = 1;
|
||||
break;
|
||||
case -NFS4ERR_FILE_OPEN:
|
||||
case -NFS4ERR_GRACE:
|
||||
case -NFS4ERR_DELAY:
|
||||
ret = nfs4_delay(server->client, &exception->timeout);
|
||||
if (ret != 0)
|
||||
break;
|
||||
case -NFS4ERR_OLD_STATEID:
|
||||
exception->retry = 1;
|
||||
}
|
||||
/* We failed to handle the error */
|
||||
return nfs4_map_errors(ret);
|
||||
}
|
||||
|
||||
|
||||
static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
|
||||
{
|
||||
struct nfs_client *clp = server->nfs_client;
|
||||
|
@ -248,7 +323,7 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
|
|||
}
|
||||
|
||||
static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
|
||||
struct nfs4_state_owner *sp, int flags,
|
||||
struct nfs4_state_owner *sp, fmode_t fmode, int flags,
|
||||
const struct iattr *attrs)
|
||||
{
|
||||
struct dentry *parent = dget_parent(path->dentry);
|
||||
|
@ -268,7 +343,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
|
|||
p->owner = sp;
|
||||
atomic_inc(&sp->so_count);
|
||||
p->o_arg.fh = NFS_FH(dir);
|
||||
p->o_arg.open_flags = flags,
|
||||
p->o_arg.open_flags = flags;
|
||||
p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
|
||||
p->o_arg.clientid = server->nfs_client->cl_clientid;
|
||||
p->o_arg.id = sp->so_owner_id.id;
|
||||
p->o_arg.name = &p->path.dentry->d_name;
|
||||
|
@ -324,10 +400,13 @@ static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int can_open_cached(struct nfs4_state *state, int mode)
|
||||
static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
|
||||
{
|
||||
int ret = 0;
|
||||
switch (mode & (FMODE_READ|FMODE_WRITE|O_EXCL)) {
|
||||
|
||||
if (open_mode & O_EXCL)
|
||||
goto out;
|
||||
switch (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
case FMODE_READ:
|
||||
ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0;
|
||||
break;
|
||||
|
@ -337,21 +416,23 @@ static int can_open_cached(struct nfs4_state *state, int mode)
|
|||
case FMODE_READ|FMODE_WRITE:
|
||||
ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0;
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int can_open_delegated(struct nfs_delegation *delegation, mode_t open_flags)
|
||||
static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
|
||||
{
|
||||
if ((delegation->type & open_flags) != open_flags)
|
||||
if ((delegation->type & fmode) != fmode)
|
||||
return 0;
|
||||
if (delegation->flags & NFS_DELEGATION_NEED_RECLAIM)
|
||||
if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
|
||||
return 0;
|
||||
nfs_mark_delegation_referenced(delegation);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void update_open_stateflags(struct nfs4_state *state, mode_t open_flags)
|
||||
static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
|
||||
{
|
||||
switch (open_flags) {
|
||||
switch (fmode) {
|
||||
case FMODE_WRITE:
|
||||
state->n_wronly++;
|
||||
break;
|
||||
|
@ -361,15 +442,15 @@ static void update_open_stateflags(struct nfs4_state *state, mode_t open_flags)
|
|||
case FMODE_READ|FMODE_WRITE:
|
||||
state->n_rdwr++;
|
||||
}
|
||||
nfs4_state_set_mode_locked(state, state->state | open_flags);
|
||||
nfs4_state_set_mode_locked(state, state->state | fmode);
|
||||
}
|
||||
|
||||
static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags)
|
||||
static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
|
||||
{
|
||||
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
|
||||
memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data));
|
||||
memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data));
|
||||
switch (open_flags) {
|
||||
switch (fmode) {
|
||||
case FMODE_READ:
|
||||
set_bit(NFS_O_RDONLY_STATE, &state->flags);
|
||||
break;
|
||||
|
@ -381,16 +462,15 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *
|
|||
}
|
||||
}
|
||||
|
||||
static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags)
|
||||
static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
|
||||
{
|
||||
write_seqlock(&state->seqlock);
|
||||
nfs_set_open_stateid_locked(state, stateid, open_flags);
|
||||
nfs_set_open_stateid_locked(state, stateid, fmode);
|
||||
write_sequnlock(&state->seqlock);
|
||||
}
|
||||
|
||||
static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *deleg_stateid, int open_flags)
|
||||
static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
|
||||
{
|
||||
open_flags &= (FMODE_READ|FMODE_WRITE);
|
||||
/*
|
||||
* Protect the call to nfs4_state_set_mode_locked and
|
||||
* serialise the stateid update
|
||||
|
@ -401,20 +481,60 @@ static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_sta
|
|||
set_bit(NFS_DELEGATED_STATE, &state->flags);
|
||||
}
|
||||
if (open_stateid != NULL)
|
||||
nfs_set_open_stateid_locked(state, open_stateid, open_flags);
|
||||
nfs_set_open_stateid_locked(state, open_stateid, fmode);
|
||||
write_sequnlock(&state->seqlock);
|
||||
spin_lock(&state->owner->so_lock);
|
||||
update_open_stateflags(state, open_flags);
|
||||
update_open_stateflags(state, fmode);
|
||||
spin_unlock(&state->owner->so_lock);
|
||||
}
|
||||
|
||||
static void nfs4_return_incompatible_delegation(struct inode *inode, mode_t open_flags)
|
||||
static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(state->inode);
|
||||
struct nfs_delegation *deleg_cur;
|
||||
int ret = 0;
|
||||
|
||||
fmode &= (FMODE_READ|FMODE_WRITE);
|
||||
|
||||
rcu_read_lock();
|
||||
deleg_cur = rcu_dereference(nfsi->delegation);
|
||||
if (deleg_cur == NULL)
|
||||
goto no_delegation;
|
||||
|
||||
spin_lock(&deleg_cur->lock);
|
||||
if (nfsi->delegation != deleg_cur ||
|
||||
(deleg_cur->type & fmode) != fmode)
|
||||
goto no_delegation_unlock;
|
||||
|
||||
if (delegation == NULL)
|
||||
delegation = &deleg_cur->stateid;
|
||||
else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0)
|
||||
goto no_delegation_unlock;
|
||||
|
||||
nfs_mark_delegation_referenced(deleg_cur);
|
||||
__update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
|
||||
ret = 1;
|
||||
no_delegation_unlock:
|
||||
spin_unlock(&deleg_cur->lock);
|
||||
no_delegation:
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!ret && open_stateid != NULL) {
|
||||
__update_open_stateid(state, open_stateid, NULL, fmode);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
|
||||
{
|
||||
struct nfs_delegation *delegation;
|
||||
|
||||
rcu_read_lock();
|
||||
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
||||
if (delegation == NULL || (delegation->type & open_flags) == open_flags) {
|
||||
if (delegation == NULL || (delegation->type & fmode) == fmode) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
@ -427,27 +547,28 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
|
|||
struct nfs4_state *state = opendata->state;
|
||||
struct nfs_inode *nfsi = NFS_I(state->inode);
|
||||
struct nfs_delegation *delegation;
|
||||
int open_mode = opendata->o_arg.open_flags & (FMODE_READ|FMODE_WRITE|O_EXCL);
|
||||
int open_mode = opendata->o_arg.open_flags & O_EXCL;
|
||||
fmode_t fmode = opendata->o_arg.fmode;
|
||||
nfs4_stateid stateid;
|
||||
int ret = -EAGAIN;
|
||||
|
||||
rcu_read_lock();
|
||||
delegation = rcu_dereference(nfsi->delegation);
|
||||
for (;;) {
|
||||
if (can_open_cached(state, open_mode)) {
|
||||
if (can_open_cached(state, fmode, open_mode)) {
|
||||
spin_lock(&state->owner->so_lock);
|
||||
if (can_open_cached(state, open_mode)) {
|
||||
update_open_stateflags(state, open_mode);
|
||||
if (can_open_cached(state, fmode, open_mode)) {
|
||||
update_open_stateflags(state, fmode);
|
||||
spin_unlock(&state->owner->so_lock);
|
||||
rcu_read_unlock();
|
||||
goto out_return_state;
|
||||
}
|
||||
spin_unlock(&state->owner->so_lock);
|
||||
}
|
||||
if (delegation == NULL)
|
||||
break;
|
||||
if (!can_open_delegated(delegation, open_mode))
|
||||
rcu_read_lock();
|
||||
delegation = rcu_dereference(nfsi->delegation);
|
||||
if (delegation == NULL ||
|
||||
!can_open_delegated(delegation, fmode)) {
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
}
|
||||
/* Save the delegation */
|
||||
memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data));
|
||||
rcu_read_unlock();
|
||||
|
@ -455,19 +576,11 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
|
|||
if (ret != 0)
|
||||
goto out;
|
||||
ret = -EAGAIN;
|
||||
rcu_read_lock();
|
||||
delegation = rcu_dereference(nfsi->delegation);
|
||||
/* If no delegation, try a cached open */
|
||||
if (delegation == NULL)
|
||||
continue;
|
||||
/* Is the delegation still valid? */
|
||||
if (memcmp(stateid.data, delegation->stateid.data, sizeof(stateid.data)) != 0)
|
||||
continue;
|
||||
rcu_read_unlock();
|
||||
update_open_stateid(state, NULL, &stateid, open_mode);
|
||||
goto out_return_state;
|
||||
|
||||
/* Try to update the stateid using the delegation */
|
||||
if (update_open_stateid(state, NULL, &stateid, fmode))
|
||||
goto out_return_state;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
return ERR_PTR(ret);
|
||||
out_return_state:
|
||||
|
@ -480,7 +593,6 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data
|
|||
struct inode *inode;
|
||||
struct nfs4_state *state = NULL;
|
||||
struct nfs_delegation *delegation;
|
||||
nfs4_stateid *deleg_stateid = NULL;
|
||||
int ret;
|
||||
|
||||
if (!data->rpc_done) {
|
||||
|
@ -507,7 +619,7 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data
|
|||
if (delegation)
|
||||
delegation_flags = delegation->flags;
|
||||
rcu_read_unlock();
|
||||
if (!(delegation_flags & NFS_DELEGATION_NEED_RECLAIM))
|
||||
if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
|
||||
nfs_inode_set_delegation(state->inode,
|
||||
data->owner->so_cred,
|
||||
&data->o_res);
|
||||
|
@ -516,12 +628,9 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data
|
|||
data->owner->so_cred,
|
||||
&data->o_res);
|
||||
}
|
||||
rcu_read_lock();
|
||||
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
||||
if (delegation != NULL)
|
||||
deleg_stateid = &delegation->stateid;
|
||||
update_open_stateid(state, &data->o_res.stateid, deleg_stateid, data->o_arg.open_flags);
|
||||
rcu_read_unlock();
|
||||
|
||||
update_open_stateid(state, &data->o_res.stateid, NULL,
|
||||
data->o_arg.fmode);
|
||||
iput(inode);
|
||||
out:
|
||||
return state;
|
||||
|
@ -552,7 +661,7 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
|
|||
{
|
||||
struct nfs4_opendata *opendata;
|
||||
|
||||
opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, NULL);
|
||||
opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL);
|
||||
if (opendata == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
opendata->state = state;
|
||||
|
@ -560,12 +669,13 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
|
|||
return opendata;
|
||||
}
|
||||
|
||||
static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, mode_t openflags, struct nfs4_state **res)
|
||||
static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
|
||||
{
|
||||
struct nfs4_state *newstate;
|
||||
int ret;
|
||||
|
||||
opendata->o_arg.open_flags = openflags;
|
||||
opendata->o_arg.open_flags = 0;
|
||||
opendata->o_arg.fmode = fmode;
|
||||
memset(&opendata->o_res, 0, sizeof(opendata->o_res));
|
||||
memset(&opendata->c_res, 0, sizeof(opendata->c_res));
|
||||
nfs4_init_opendata_res(opendata);
|
||||
|
@ -575,7 +685,7 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, mode_t openf
|
|||
newstate = nfs4_opendata_to_nfs4_state(opendata);
|
||||
if (IS_ERR(newstate))
|
||||
return PTR_ERR(newstate);
|
||||
nfs4_close_state(&opendata->path, newstate, openflags);
|
||||
nfs4_close_state(&opendata->path, newstate, fmode);
|
||||
*res = newstate;
|
||||
return 0;
|
||||
}
|
||||
|
@ -631,7 +741,7 @@ static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state
|
|||
{
|
||||
struct nfs_delegation *delegation;
|
||||
struct nfs4_opendata *opendata;
|
||||
int delegation_type = 0;
|
||||
fmode_t delegation_type = 0;
|
||||
int status;
|
||||
|
||||
opendata = nfs4_open_recoverdata_alloc(ctx, state);
|
||||
|
@ -641,7 +751,7 @@ static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state
|
|||
opendata->o_arg.fh = NFS_FH(state->inode);
|
||||
rcu_read_lock();
|
||||
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
|
||||
if (delegation != NULL && (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) != 0)
|
||||
if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
|
||||
delegation_type = delegation->type;
|
||||
rcu_read_unlock();
|
||||
opendata->o_arg.u.delegation_type = delegation_type;
|
||||
|
@ -744,7 +854,7 @@ static void nfs4_open_confirm_release(void *calldata)
|
|||
goto out_free;
|
||||
state = nfs4_opendata_to_nfs4_state(data);
|
||||
if (!IS_ERR(state))
|
||||
nfs4_close_state(&data->path, state, data->o_arg.open_flags);
|
||||
nfs4_close_state(&data->path, state, data->o_arg.fmode);
|
||||
out_free:
|
||||
nfs4_opendata_put(data);
|
||||
}
|
||||
|
@ -808,12 +918,12 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
|
|||
if (data->state != NULL) {
|
||||
struct nfs_delegation *delegation;
|
||||
|
||||
if (can_open_cached(data->state, data->o_arg.open_flags & (FMODE_READ|FMODE_WRITE|O_EXCL)))
|
||||
if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
|
||||
goto out_no_action;
|
||||
rcu_read_lock();
|
||||
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
|
||||
if (delegation != NULL &&
|
||||
(delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0) {
|
||||
test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
|
||||
rcu_read_unlock();
|
||||
goto out_no_action;
|
||||
}
|
||||
|
@ -877,7 +987,7 @@ static void nfs4_open_release(void *calldata)
|
|||
goto out_free;
|
||||
state = nfs4_opendata_to_nfs4_state(data);
|
||||
if (!IS_ERR(state))
|
||||
nfs4_close_state(&data->path, state, data->o_arg.open_flags);
|
||||
nfs4_close_state(&data->path, state, data->o_arg.fmode);
|
||||
out_free:
|
||||
nfs4_opendata_put(data);
|
||||
}
|
||||
|
@ -955,10 +1065,11 @@ static int nfs4_recover_expired_lease(struct nfs_server *server)
|
|||
int ret;
|
||||
|
||||
for (;;) {
|
||||
ret = nfs4_wait_clnt_recover(server->client, clp);
|
||||
ret = nfs4_wait_clnt_recover(clp);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
if (!test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
|
||||
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
|
||||
!test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
|
||||
break;
|
||||
nfs4_schedule_state_recovery(clp);
|
||||
}
|
||||
|
@ -993,8 +1104,9 @@ static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4
|
|||
|
||||
do {
|
||||
err = _nfs4_open_expired(ctx, state);
|
||||
if (err == -NFS4ERR_DELAY)
|
||||
nfs4_handle_exception(server, err, &exception);
|
||||
if (err != -NFS4ERR_DELAY)
|
||||
break;
|
||||
nfs4_handle_exception(server, err, &exception);
|
||||
} while (exception.retry);
|
||||
return err;
|
||||
}
|
||||
|
@ -1031,12 +1143,11 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct
|
|||
/*
|
||||
* Returns a referenced nfs4_state
|
||||
*/
|
||||
static int _nfs4_do_open(struct inode *dir, struct path *path, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
|
||||
static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
|
||||
{
|
||||
struct nfs4_state_owner *sp;
|
||||
struct nfs4_state *state = NULL;
|
||||
struct nfs_server *server = NFS_SERVER(dir);
|
||||
struct nfs_client *clp = server->nfs_client;
|
||||
struct nfs4_opendata *opendata;
|
||||
int status;
|
||||
|
||||
|
@ -1050,12 +1161,11 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, int flags, struct
|
|||
if (status != 0)
|
||||
goto err_put_state_owner;
|
||||
if (path->dentry->d_inode != NULL)
|
||||
nfs4_return_incompatible_delegation(path->dentry->d_inode, flags & (FMODE_READ|FMODE_WRITE));
|
||||
down_read(&clp->cl_sem);
|
||||
nfs4_return_incompatible_delegation(path->dentry->d_inode, fmode);
|
||||
status = -ENOMEM;
|
||||
opendata = nfs4_opendata_alloc(path, sp, flags, sattr);
|
||||
opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr);
|
||||
if (opendata == NULL)
|
||||
goto err_release_rwsem;
|
||||
goto err_put_state_owner;
|
||||
|
||||
if (path->dentry->d_inode != NULL)
|
||||
opendata->state = nfs4_get_open_state(path->dentry->d_inode, sp);
|
||||
|
@ -1073,13 +1183,10 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, int flags, struct
|
|||
goto err_opendata_put;
|
||||
nfs4_opendata_put(opendata);
|
||||
nfs4_put_state_owner(sp);
|
||||
up_read(&clp->cl_sem);
|
||||
*res = state;
|
||||
return 0;
|
||||
err_opendata_put:
|
||||
nfs4_opendata_put(opendata);
|
||||
err_release_rwsem:
|
||||
up_read(&clp->cl_sem);
|
||||
err_put_state_owner:
|
||||
nfs4_put_state_owner(sp);
|
||||
out_err:
|
||||
|
@ -1088,14 +1195,14 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, int flags, struct
|
|||
}
|
||||
|
||||
|
||||
static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, int flags, struct iattr *sattr, struct rpc_cred *cred)
|
||||
static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
|
||||
{
|
||||
struct nfs4_exception exception = { };
|
||||
struct nfs4_state *res;
|
||||
int status;
|
||||
|
||||
do {
|
||||
status = _nfs4_do_open(dir, path, flags, sattr, cred, &res);
|
||||
status = _nfs4_do_open(dir, path, fmode, flags, sattr, cred, &res);
|
||||
if (status == 0)
|
||||
break;
|
||||
/* NOTE: BAD_SEQID means the server and client disagree about the
|
||||
|
@ -1230,10 +1337,13 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
|
|||
renew_lease(server, calldata->timestamp);
|
||||
break;
|
||||
case -NFS4ERR_STALE_STATEID:
|
||||
case -NFS4ERR_OLD_STATEID:
|
||||
case -NFS4ERR_BAD_STATEID:
|
||||
case -NFS4ERR_EXPIRED:
|
||||
break;
|
||||
if (calldata->arg.fmode == 0)
|
||||
break;
|
||||
default:
|
||||
if (nfs4_async_handle_error(task, server) == -EAGAIN) {
|
||||
if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
|
||||
rpc_restart_call(task);
|
||||
return;
|
||||
}
|
||||
|
@ -1272,10 +1382,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
|
|||
nfs_fattr_init(calldata->res.fattr);
|
||||
if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0) {
|
||||
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
|
||||
calldata->arg.open_flags = FMODE_READ;
|
||||
calldata->arg.fmode = FMODE_READ;
|
||||
} else if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0) {
|
||||
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
|
||||
calldata->arg.open_flags = FMODE_WRITE;
|
||||
calldata->arg.fmode = FMODE_WRITE;
|
||||
}
|
||||
calldata->timestamp = jiffies;
|
||||
rpc_call_start(task);
|
||||
|
@ -1328,6 +1438,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
|
|||
calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid);
|
||||
if (calldata->arg.seqid == NULL)
|
||||
goto out_free_calldata;
|
||||
calldata->arg.fmode = 0;
|
||||
calldata->arg.bitmask = server->attr_bitmask;
|
||||
calldata->res.fattr = &calldata->fattr;
|
||||
calldata->res.seqid = calldata->arg.seqid;
|
||||
|
@ -1354,13 +1465,13 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
|
|||
return status;
|
||||
}
|
||||
|
||||
static int nfs4_intent_set_file(struct nameidata *nd, struct path *path, struct nfs4_state *state)
|
||||
static int nfs4_intent_set_file(struct nameidata *nd, struct path *path, struct nfs4_state *state, fmode_t fmode)
|
||||
{
|
||||
struct file *filp;
|
||||
int ret;
|
||||
|
||||
/* If the open_intent is for execute, we have an extra check to make */
|
||||
if (nd->intent.open.flags & FMODE_EXEC) {
|
||||
if (fmode & FMODE_EXEC) {
|
||||
ret = nfs_may_open(state->inode,
|
||||
state->owner->so_cred,
|
||||
nd->intent.open.flags);
|
||||
|
@ -1376,7 +1487,7 @@ static int nfs4_intent_set_file(struct nameidata *nd, struct path *path, struct
|
|||
}
|
||||
ret = PTR_ERR(filp);
|
||||
out_close:
|
||||
nfs4_close_sync(path, state, nd->intent.open.flags);
|
||||
nfs4_close_sync(path, state, fmode & (FMODE_READ|FMODE_WRITE));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1392,6 +1503,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
|
|||
struct rpc_cred *cred;
|
||||
struct nfs4_state *state;
|
||||
struct dentry *res;
|
||||
fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
|
||||
|
||||
if (nd->flags & LOOKUP_CREATE) {
|
||||
attr.ia_mode = nd->intent.open.create_mode;
|
||||
|
@ -1409,7 +1521,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
|
|||
parent = dentry->d_parent;
|
||||
/* Protect against concurrent sillydeletes */
|
||||
nfs_block_sillyrename(parent);
|
||||
state = nfs4_do_open(dir, &path, nd->intent.open.flags, &attr, cred);
|
||||
state = nfs4_do_open(dir, &path, fmode, nd->intent.open.flags, &attr, cred);
|
||||
put_rpccred(cred);
|
||||
if (IS_ERR(state)) {
|
||||
if (PTR_ERR(state) == -ENOENT) {
|
||||
|
@ -1424,7 +1536,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
|
|||
path.dentry = res;
|
||||
nfs_set_verifier(path.dentry, nfs_save_change_attribute(dir));
|
||||
nfs_unblock_sillyrename(parent);
|
||||
nfs4_intent_set_file(nd, &path, state);
|
||||
nfs4_intent_set_file(nd, &path, state, fmode);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -1437,11 +1549,12 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st
|
|||
};
|
||||
struct rpc_cred *cred;
|
||||
struct nfs4_state *state;
|
||||
fmode_t fmode = openflags & (FMODE_READ | FMODE_WRITE);
|
||||
|
||||
cred = rpc_lookup_cred();
|
||||
if (IS_ERR(cred))
|
||||
return PTR_ERR(cred);
|
||||
state = nfs4_do_open(dir, &path, openflags, NULL, cred);
|
||||
state = nfs4_do_open(dir, &path, fmode, openflags, NULL, cred);
|
||||
put_rpccred(cred);
|
||||
if (IS_ERR(state)) {
|
||||
switch (PTR_ERR(state)) {
|
||||
|
@ -1458,10 +1571,10 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st
|
|||
}
|
||||
if (state->inode == dentry->d_inode) {
|
||||
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
||||
nfs4_intent_set_file(nd, &path, state);
|
||||
nfs4_intent_set_file(nd, &path, state, fmode);
|
||||
return 1;
|
||||
}
|
||||
nfs4_close_sync(&path, state, openflags);
|
||||
nfs4_close_sync(&path, state, fmode);
|
||||
out_drop:
|
||||
d_drop(dentry);
|
||||
return 0;
|
||||
|
@ -1887,6 +2000,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
|
|||
};
|
||||
struct nfs4_state *state;
|
||||
struct rpc_cred *cred;
|
||||
fmode_t fmode = flags & (FMODE_READ | FMODE_WRITE);
|
||||
int status = 0;
|
||||
|
||||
cred = rpc_lookup_cred();
|
||||
|
@ -1894,7 +2008,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
|
|||
status = PTR_ERR(cred);
|
||||
goto out;
|
||||
}
|
||||
state = nfs4_do_open(dir, &path, flags, sattr, cred);
|
||||
state = nfs4_do_open(dir, &path, fmode, flags, sattr, cred);
|
||||
d_drop(dentry);
|
||||
if (IS_ERR(state)) {
|
||||
status = PTR_ERR(state);
|
||||
|
@ -1910,9 +2024,9 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
|
|||
nfs_post_op_update_inode(state->inode, &fattr);
|
||||
}
|
||||
if (status == 0 && (nd->flags & LOOKUP_OPEN) != 0)
|
||||
status = nfs4_intent_set_file(nd, &path, state);
|
||||
status = nfs4_intent_set_file(nd, &path, state, fmode);
|
||||
else
|
||||
nfs4_close_sync(&path, state, flags);
|
||||
nfs4_close_sync(&path, state, fmode);
|
||||
out_putcred:
|
||||
put_rpccred(cred);
|
||||
out:
|
||||
|
@ -1974,7 +2088,7 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
|
|||
{
|
||||
struct nfs_removeres *res = task->tk_msg.rpc_resp;
|
||||
|
||||
if (nfs4_async_handle_error(task, res->server) == -EAGAIN)
|
||||
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
|
||||
return 0;
|
||||
update_changeattr(dir, &res->cinfo);
|
||||
nfs_post_op_update_inode(dir, &res->dir_attr);
|
||||
|
@ -2402,7 +2516,7 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
|
|||
{
|
||||
struct nfs_server *server = NFS_SERVER(data->inode);
|
||||
|
||||
if (nfs4_async_handle_error(task, server) == -EAGAIN) {
|
||||
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
|
||||
rpc_restart_call(task);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
@ -2423,7 +2537,7 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
|
|||
{
|
||||
struct inode *inode = data->inode;
|
||||
|
||||
if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
|
||||
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
|
||||
rpc_restart_call(task);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
@ -2449,7 +2563,7 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
|
|||
{
|
||||
struct inode *inode = data->inode;
|
||||
|
||||
if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
|
||||
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
|
||||
rpc_restart_call(task);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
@ -2742,19 +2856,25 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
|
|||
}
|
||||
|
||||
static int
|
||||
nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
|
||||
nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
|
||||
{
|
||||
struct nfs_client *clp = server->nfs_client;
|
||||
|
||||
if (!clp || task->tk_status >= 0)
|
||||
return 0;
|
||||
switch(task->tk_status) {
|
||||
case -NFS4ERR_ADMIN_REVOKED:
|
||||
case -NFS4ERR_BAD_STATEID:
|
||||
case -NFS4ERR_OPENMODE:
|
||||
if (state == NULL)
|
||||
break;
|
||||
nfs4_state_mark_reclaim_nograce(clp, state);
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
case -NFS4ERR_STALE_STATEID:
|
||||
case -NFS4ERR_EXPIRED:
|
||||
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
|
||||
nfs4_schedule_state_recovery(clp);
|
||||
if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
|
||||
if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
|
||||
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
|
||||
task->tk_status = 0;
|
||||
return -EAGAIN;
|
||||
|
@ -2772,79 +2892,6 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nfs4_wait_bit_killable(void *word)
|
||||
{
|
||||
if (fatal_signal_pending(current))
|
||||
return -ERESTARTSYS;
|
||||
schedule();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
|
||||
{
|
||||
int res;
|
||||
|
||||
might_sleep();
|
||||
|
||||
rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_);
|
||||
|
||||
res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
|
||||
nfs4_wait_bit_killable, TASK_KILLABLE);
|
||||
|
||||
rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_);
|
||||
return res;
|
||||
}
|
||||
|
||||
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
|
||||
{
|
||||
int res = 0;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (*timeout <= 0)
|
||||
*timeout = NFS4_POLL_RETRY_MIN;
|
||||
if (*timeout > NFS4_POLL_RETRY_MAX)
|
||||
*timeout = NFS4_POLL_RETRY_MAX;
|
||||
schedule_timeout_killable(*timeout);
|
||||
if (fatal_signal_pending(current))
|
||||
res = -ERESTARTSYS;
|
||||
*timeout <<= 1;
|
||||
return res;
|
||||
}
|
||||
|
||||
/* This is the error handling routine for processes that are allowed
|
||||
* to sleep.
|
||||
*/
|
||||
static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
|
||||
{
|
||||
struct nfs_client *clp = server->nfs_client;
|
||||
int ret = errorcode;
|
||||
|
||||
exception->retry = 0;
|
||||
switch(errorcode) {
|
||||
case 0:
|
||||
return 0;
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
case -NFS4ERR_STALE_STATEID:
|
||||
case -NFS4ERR_EXPIRED:
|
||||
nfs4_schedule_state_recovery(clp);
|
||||
ret = nfs4_wait_clnt_recover(server->client, clp);
|
||||
if (ret == 0)
|
||||
exception->retry = 1;
|
||||
break;
|
||||
case -NFS4ERR_FILE_OPEN:
|
||||
case -NFS4ERR_GRACE:
|
||||
case -NFS4ERR_DELAY:
|
||||
ret = nfs4_delay(server->client, &exception->timeout);
|
||||
if (ret != 0)
|
||||
break;
|
||||
case -NFS4ERR_OLD_STATEID:
|
||||
exception->retry = 1;
|
||||
}
|
||||
/* We failed to handle the error */
|
||||
return nfs4_map_errors(ret);
|
||||
}
|
||||
|
||||
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred)
|
||||
{
|
||||
nfs4_verifier sc_verifier;
|
||||
|
@ -2916,7 +2963,6 @@ static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cre
|
|||
spin_lock(&clp->cl_lock);
|
||||
clp->cl_lease_time = fsinfo.lease_time * HZ;
|
||||
clp->cl_last_renewal = now;
|
||||
clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
}
|
||||
return status;
|
||||
|
@ -3074,7 +3120,6 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
|
|||
struct nfs4_lock_state *lsp;
|
||||
int status;
|
||||
|
||||
down_read(&clp->cl_sem);
|
||||
arg.lock_owner.clientid = clp->cl_clientid;
|
||||
status = nfs4_set_lock_state(state, request);
|
||||
if (status != 0)
|
||||
|
@ -3091,7 +3136,6 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
|
|||
}
|
||||
request->fl_ops->fl_release_private(request);
|
||||
out:
|
||||
up_read(&clp->cl_sem);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -3181,11 +3225,13 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
|
|||
sizeof(calldata->lsp->ls_stateid.data));
|
||||
renew_lease(calldata->server, calldata->timestamp);
|
||||
break;
|
||||
case -NFS4ERR_BAD_STATEID:
|
||||
case -NFS4ERR_OLD_STATEID:
|
||||
case -NFS4ERR_STALE_STATEID:
|
||||
case -NFS4ERR_EXPIRED:
|
||||
break;
|
||||
default:
|
||||
if (nfs4_async_handle_error(task, calldata->server) == -EAGAIN)
|
||||
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
|
||||
rpc_restart_call(task);
|
||||
}
|
||||
}
|
||||
|
@ -3248,6 +3294,7 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
|
|||
|
||||
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(state->inode);
|
||||
struct nfs_seqid *seqid;
|
||||
struct nfs4_lock_state *lsp;
|
||||
struct rpc_task *task;
|
||||
|
@ -3257,8 +3304,12 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
|
|||
status = nfs4_set_lock_state(state, request);
|
||||
/* Unlock _before_ we do the RPC call */
|
||||
request->fl_flags |= FL_EXISTS;
|
||||
if (do_vfs_lock(request->fl_file, request) == -ENOENT)
|
||||
down_read(&nfsi->rwsem);
|
||||
if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
|
||||
up_read(&nfsi->rwsem);
|
||||
goto out;
|
||||
}
|
||||
up_read(&nfsi->rwsem);
|
||||
if (status != 0)
|
||||
goto out;
|
||||
/* Is this a delegated lock? */
|
||||
|
@ -3484,7 +3535,7 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request
|
|||
|
||||
static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
||||
{
|
||||
struct nfs_client *clp = state->owner->so_client;
|
||||
struct nfs_inode *nfsi = NFS_I(state->inode);
|
||||
unsigned char fl_flags = request->fl_flags;
|
||||
int status;
|
||||
|
||||
|
@ -3496,19 +3547,13 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
|
|||
status = do_vfs_lock(request->fl_file, request);
|
||||
if (status < 0)
|
||||
goto out;
|
||||
down_read(&clp->cl_sem);
|
||||
down_read(&nfsi->rwsem);
|
||||
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
|
||||
struct nfs_inode *nfsi = NFS_I(state->inode);
|
||||
/* Yes: cache locks! */
|
||||
down_read(&nfsi->rwsem);
|
||||
/* ...but avoid races with delegation recall... */
|
||||
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
|
||||
request->fl_flags = fl_flags & ~FL_SLEEP;
|
||||
status = do_vfs_lock(request->fl_file, request);
|
||||
up_read(&nfsi->rwsem);
|
||||
goto out_unlock;
|
||||
}
|
||||
up_read(&nfsi->rwsem);
|
||||
request->fl_flags = fl_flags & ~FL_SLEEP;
|
||||
status = do_vfs_lock(request->fl_file, request);
|
||||
goto out_unlock;
|
||||
}
|
||||
status = _nfs4_do_setlk(state, cmd, request, 0);
|
||||
if (status != 0)
|
||||
|
@ -3518,7 +3563,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
|
|||
if (do_vfs_lock(request->fl_file, request) < 0)
|
||||
printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
|
||||
out_unlock:
|
||||
up_read(&clp->cl_sem);
|
||||
up_read(&nfsi->rwsem);
|
||||
out:
|
||||
request->fl_flags = fl_flags;
|
||||
return status;
|
||||
|
@ -3664,11 +3709,15 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
|
|||
}
|
||||
|
||||
struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = {
|
||||
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
|
||||
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
|
||||
.recover_open = nfs4_open_reclaim,
|
||||
.recover_lock = nfs4_lock_reclaim,
|
||||
};
|
||||
|
||||
struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops = {
|
||||
struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops = {
|
||||
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
|
||||
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
|
||||
.recover_open = nfs4_open_expired,
|
||||
.recover_lock = nfs4_lock_expired,
|
||||
};
|
||||
|
|
|
@ -65,7 +65,6 @@ nfs4_renew_state(struct work_struct *work)
|
|||
long lease, timeout;
|
||||
unsigned long last, now;
|
||||
|
||||
down_read(&clp->cl_sem);
|
||||
dprintk("%s: start\n", __func__);
|
||||
/* Are there any active superblocks? */
|
||||
if (list_empty(&clp->cl_superblocks))
|
||||
|
@ -77,17 +76,19 @@ nfs4_renew_state(struct work_struct *work)
|
|||
timeout = (2 * lease) / 3 + (long)last - (long)now;
|
||||
/* Are we close to a lease timeout? */
|
||||
if (time_after(now, last + lease/3)) {
|
||||
cred = nfs4_get_renew_cred(clp);
|
||||
if (cred == NULL) {
|
||||
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
nfs_expire_all_delegations(clp);
|
||||
goto out;
|
||||
}
|
||||
cred = nfs4_get_renew_cred_locked(clp);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
/* Queue an asynchronous RENEW. */
|
||||
nfs4_proc_async_renew(clp, cred);
|
||||
put_rpccred(cred);
|
||||
if (cred == NULL) {
|
||||
if (list_empty(&clp->cl_delegations)) {
|
||||
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
|
||||
goto out;
|
||||
}
|
||||
nfs_expire_all_delegations(clp);
|
||||
} else {
|
||||
/* Queue an asynchronous RENEW. */
|
||||
nfs4_proc_async_renew(clp, cred);
|
||||
put_rpccred(cred);
|
||||
}
|
||||
timeout = (2 * lease) / 3;
|
||||
spin_lock(&clp->cl_lock);
|
||||
} else
|
||||
|
@ -100,12 +101,11 @@ nfs4_renew_state(struct work_struct *work)
|
|||
cancel_delayed_work(&clp->cl_renewd);
|
||||
schedule_delayed_work(&clp->cl_renewd, timeout);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
nfs_expire_unreferenced_delegations(clp);
|
||||
out:
|
||||
up_read(&clp->cl_sem);
|
||||
dprintk("%s: done\n", __func__);
|
||||
}
|
||||
|
||||
/* Must be called with clp->cl_sem locked for writes */
|
||||
void
|
||||
nfs4_schedule_state_renewal(struct nfs_client *clp)
|
||||
{
|
||||
|
|
|
@ -71,14 +71,12 @@ static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
|
|||
return status;
|
||||
}
|
||||
|
||||
static struct rpc_cred *nfs4_get_machine_cred(struct nfs_client *clp)
|
||||
static struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
|
||||
{
|
||||
struct rpc_cred *cred = NULL;
|
||||
|
||||
spin_lock(&clp->cl_lock);
|
||||
if (clp->cl_machine_cred != NULL)
|
||||
cred = get_rpccred(clp->cl_machine_cred);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
return cred;
|
||||
}
|
||||
|
||||
|
@ -94,7 +92,7 @@ static void nfs4_clear_machine_cred(struct nfs_client *clp)
|
|||
put_rpccred(cred);
|
||||
}
|
||||
|
||||
struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
|
||||
struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
|
||||
{
|
||||
struct nfs4_state_owner *sp;
|
||||
struct rb_node *pos;
|
||||
|
@ -110,13 +108,24 @@ struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
|
|||
return cred;
|
||||
}
|
||||
|
||||
static struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
|
||||
{
|
||||
struct rpc_cred *cred;
|
||||
|
||||
spin_lock(&clp->cl_lock);
|
||||
cred = nfs4_get_renew_cred_locked(clp);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
return cred;
|
||||
}
|
||||
|
||||
static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
|
||||
{
|
||||
struct nfs4_state_owner *sp;
|
||||
struct rb_node *pos;
|
||||
struct rpc_cred *cred;
|
||||
|
||||
cred = nfs4_get_machine_cred(clp);
|
||||
spin_lock(&clp->cl_lock);
|
||||
cred = nfs4_get_machine_cred_locked(clp);
|
||||
if (cred != NULL)
|
||||
goto out;
|
||||
pos = rb_first(&clp->cl_state_owners);
|
||||
|
@ -125,6 +134,7 @@ static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
|
|||
cred = get_rpccred(sp->so_cred);
|
||||
}
|
||||
out:
|
||||
spin_unlock(&clp->cl_lock);
|
||||
return cred;
|
||||
}
|
||||
|
||||
|
@ -295,10 +305,6 @@ nfs4_drop_state_owner(struct nfs4_state_owner *sp)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: must be called with clp->cl_sem held in order to prevent races
|
||||
* with reboot recovery!
|
||||
*/
|
||||
struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
|
||||
{
|
||||
struct nfs_client *clp = server->nfs_client;
|
||||
|
@ -327,10 +333,6 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
|
|||
return sp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be called with clp->cl_sem held in order to avoid races
|
||||
* with state recovery...
|
||||
*/
|
||||
void nfs4_put_state_owner(struct nfs4_state_owner *sp)
|
||||
{
|
||||
struct nfs_client *clp = sp->so_client;
|
||||
|
@ -361,18 +363,18 @@ nfs4_alloc_open_state(void)
|
|||
}
|
||||
|
||||
void
|
||||
nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
|
||||
nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
|
||||
{
|
||||
if (state->state == mode)
|
||||
if (state->state == fmode)
|
||||
return;
|
||||
/* NB! List reordering - see the reclaim code for why. */
|
||||
if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
|
||||
if (mode & FMODE_WRITE)
|
||||
if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
|
||||
if (fmode & FMODE_WRITE)
|
||||
list_move(&state->open_states, &state->owner->so_states);
|
||||
else
|
||||
list_move_tail(&state->open_states, &state->owner->so_states);
|
||||
}
|
||||
state->state = mode;
|
||||
state->state = fmode;
|
||||
}
|
||||
|
||||
static struct nfs4_state *
|
||||
|
@ -432,10 +434,6 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
|
|||
return state;
|
||||
}
|
||||
|
||||
/*
|
||||
* Beware! Caller must be holding exactly one
|
||||
* reference to clp->cl_sem!
|
||||
*/
|
||||
void nfs4_put_open_state(struct nfs4_state *state)
|
||||
{
|
||||
struct inode *inode = state->inode;
|
||||
|
@ -456,16 +454,16 @@ void nfs4_put_open_state(struct nfs4_state *state)
|
|||
/*
|
||||
* Close the current file.
|
||||
*/
|
||||
static void __nfs4_close(struct path *path, struct nfs4_state *state, mode_t mode, int wait)
|
||||
static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, int wait)
|
||||
{
|
||||
struct nfs4_state_owner *owner = state->owner;
|
||||
int call_close = 0;
|
||||
int newstate;
|
||||
fmode_t newstate;
|
||||
|
||||
atomic_inc(&owner->so_count);
|
||||
/* Protect against nfs4_find_state() */
|
||||
spin_lock(&owner->so_lock);
|
||||
switch (mode & (FMODE_READ | FMODE_WRITE)) {
|
||||
switch (fmode & (FMODE_READ | FMODE_WRITE)) {
|
||||
case FMODE_READ:
|
||||
state->n_rdonly--;
|
||||
break;
|
||||
|
@ -500,14 +498,14 @@ static void __nfs4_close(struct path *path, struct nfs4_state *state, mode_t mod
|
|||
nfs4_do_close(path, state, wait);
|
||||
}
|
||||
|
||||
void nfs4_close_state(struct path *path, struct nfs4_state *state, mode_t mode)
|
||||
void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
|
||||
{
|
||||
__nfs4_close(path, state, mode, 0);
|
||||
__nfs4_close(path, state, fmode, 0);
|
||||
}
|
||||
|
||||
void nfs4_close_sync(struct path *path, struct nfs4_state *state, mode_t mode)
|
||||
void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
|
||||
{
|
||||
__nfs4_close(path, state, mode, 1);
|
||||
__nfs4_close(path, state, fmode, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -568,7 +566,6 @@ static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
|
|||
* Return a compatible lock_state. If no initialized lock_state structure
|
||||
* exists, return an uninitialized one.
|
||||
*
|
||||
* The caller must be holding clp->cl_sem
|
||||
*/
|
||||
static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
|
||||
{
|
||||
|
@ -770,32 +767,34 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
|
|||
return status;
|
||||
}
|
||||
|
||||
static int reclaimer(void *);
|
||||
static int nfs4_run_state_manager(void *);
|
||||
|
||||
static inline void nfs4_clear_recover_bit(struct nfs_client *clp)
|
||||
static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
|
||||
clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
|
||||
wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
|
||||
rpc_wake_up(&clp->cl_rpcwaitq);
|
||||
}
|
||||
|
||||
/*
|
||||
* State recovery routine
|
||||
* Schedule the nfs_client asynchronous state management routine
|
||||
*/
|
||||
static void nfs4_recover_state(struct nfs_client *clp)
|
||||
void nfs4_schedule_state_manager(struct nfs_client *clp)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
|
||||
return;
|
||||
__module_get(THIS_MODULE);
|
||||
atomic_inc(&clp->cl_count);
|
||||
task = kthread_run(reclaimer, clp, "%s-reclaim",
|
||||
task = kthread_run(nfs4_run_state_manager, clp, "%s-manager",
|
||||
rpc_peeraddr2str(clp->cl_rpcclient,
|
||||
RPC_DISPLAY_ADDR));
|
||||
if (!IS_ERR(task))
|
||||
return;
|
||||
nfs4_clear_recover_bit(clp);
|
||||
nfs4_clear_state_manager_bit(clp);
|
||||
nfs_put_client(clp);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
@ -807,16 +806,42 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp)
|
|||
{
|
||||
if (!clp)
|
||||
return;
|
||||
if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
|
||||
nfs4_recover_state(clp);
|
||||
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
|
||||
set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
|
||||
nfs4_schedule_state_manager(clp);
|
||||
}
|
||||
|
||||
static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
|
||||
static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
|
||||
{
|
||||
|
||||
set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
|
||||
/* Don't recover state that expired before the reboot */
|
||||
if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
|
||||
clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
|
||||
return 0;
|
||||
}
|
||||
set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
|
||||
set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
|
||||
{
|
||||
set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
|
||||
clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
|
||||
set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
|
||||
set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
|
||||
{
|
||||
struct inode *inode = state->inode;
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
struct file_lock *fl;
|
||||
int status = 0;
|
||||
|
||||
down_write(&nfsi->rwsem);
|
||||
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
|
||||
if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
|
||||
continue;
|
||||
|
@ -839,12 +864,14 @@ static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_s
|
|||
goto out_err;
|
||||
}
|
||||
}
|
||||
up_write(&nfsi->rwsem);
|
||||
return 0;
|
||||
out_err:
|
||||
up_write(&nfsi->rwsem);
|
||||
return status;
|
||||
}
|
||||
|
||||
static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
|
||||
static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
|
||||
{
|
||||
struct nfs4_state *state;
|
||||
struct nfs4_lock_state *lock;
|
||||
|
@ -858,28 +885,34 @@ static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct n
|
|||
* recovering after a network partition or a reboot from a
|
||||
* server that doesn't support a grace period.
|
||||
*/
|
||||
restart:
|
||||
spin_lock(&sp->so_lock);
|
||||
list_for_each_entry(state, &sp->so_states, open_states) {
|
||||
if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
|
||||
continue;
|
||||
if (state->state == 0)
|
||||
continue;
|
||||
atomic_inc(&state->count);
|
||||
spin_unlock(&sp->so_lock);
|
||||
status = ops->recover_open(sp, state);
|
||||
if (status >= 0) {
|
||||
status = nfs4_reclaim_locks(ops, state);
|
||||
if (status < 0)
|
||||
goto out_err;
|
||||
list_for_each_entry(lock, &state->lock_states, ls_locks) {
|
||||
if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
|
||||
printk("%s: Lock reclaim failed!\n",
|
||||
status = nfs4_reclaim_locks(state, ops);
|
||||
if (status >= 0) {
|
||||
list_for_each_entry(lock, &state->lock_states, ls_locks) {
|
||||
if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
|
||||
printk("%s: Lock reclaim failed!\n",
|
||||
__func__);
|
||||
}
|
||||
nfs4_put_open_state(state);
|
||||
goto restart;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
switch (status) {
|
||||
default:
|
||||
printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
|
||||
__func__, status);
|
||||
case -ENOENT:
|
||||
case -NFS4ERR_RECLAIM_BAD:
|
||||
case -NFS4ERR_RECLAIM_CONFLICT:
|
||||
case -ESTALE:
|
||||
/*
|
||||
* Open state on this file cannot be recovered
|
||||
* All we can do is revert to using the zero stateid.
|
||||
|
@ -889,84 +922,176 @@ static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct n
|
|||
/* Mark the file as being 'closed' */
|
||||
state->state = 0;
|
||||
break;
|
||||
case -NFS4ERR_RECLAIM_BAD:
|
||||
case -NFS4ERR_RECLAIM_CONFLICT:
|
||||
nfs4_state_mark_reclaim_nograce(sp->so_client, state);
|
||||
break;
|
||||
case -NFS4ERR_EXPIRED:
|
||||
case -NFS4ERR_NO_GRACE:
|
||||
nfs4_state_mark_reclaim_nograce(sp->so_client, state);
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
goto out_err;
|
||||
}
|
||||
nfs4_put_open_state(state);
|
||||
goto restart;
|
||||
}
|
||||
spin_unlock(&sp->so_lock);
|
||||
return 0;
|
||||
out_err:
|
||||
nfs4_put_open_state(state);
|
||||
return status;
|
||||
}
|
||||
|
||||
static void nfs4_state_mark_reclaim(struct nfs_client *clp)
|
||||
static void nfs4_clear_open_state(struct nfs4_state *state)
|
||||
{
|
||||
struct nfs4_lock_state *lock;
|
||||
|
||||
clear_bit(NFS_DELEGATED_STATE, &state->flags);
|
||||
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
||||
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
||||
clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
||||
list_for_each_entry(lock, &state->lock_states, ls_locks) {
|
||||
lock->ls_seqid.flags = 0;
|
||||
lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
|
||||
}
|
||||
}
|
||||
|
||||
static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
|
||||
{
|
||||
struct nfs4_state_owner *sp;
|
||||
struct rb_node *pos;
|
||||
struct nfs4_state *state;
|
||||
struct nfs4_lock_state *lock;
|
||||
|
||||
/* Reset all sequence ids to zero */
|
||||
for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
|
||||
sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
|
||||
sp->so_seqid.counter = 0;
|
||||
sp->so_seqid.flags = 0;
|
||||
spin_lock(&sp->so_lock);
|
||||
list_for_each_entry(state, &sp->so_states, open_states) {
|
||||
clear_bit(NFS_DELEGATED_STATE, &state->flags);
|
||||
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
||||
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
||||
clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
||||
list_for_each_entry(lock, &state->lock_states, ls_locks) {
|
||||
lock->ls_seqid.counter = 0;
|
||||
lock->ls_seqid.flags = 0;
|
||||
lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
|
||||
}
|
||||
if (mark_reclaim(clp, state))
|
||||
nfs4_clear_open_state(state);
|
||||
}
|
||||
spin_unlock(&sp->so_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static int reclaimer(void *ptr)
|
||||
static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
|
||||
{
|
||||
/* Mark all delegations for reclaim */
|
||||
nfs_delegation_mark_reclaim(clp);
|
||||
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
|
||||
}
|
||||
|
||||
static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
|
||||
{
|
||||
struct nfs_client *clp = ptr;
|
||||
struct nfs4_state_owner *sp;
|
||||
struct rb_node *pos;
|
||||
struct nfs4_state_recovery_ops *ops;
|
||||
struct rpc_cred *cred;
|
||||
struct nfs4_state *state;
|
||||
|
||||
if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
|
||||
return;
|
||||
|
||||
for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
|
||||
sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
|
||||
spin_lock(&sp->so_lock);
|
||||
list_for_each_entry(state, &sp->so_states, open_states) {
|
||||
if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
|
||||
continue;
|
||||
nfs4_state_mark_reclaim_nograce(clp, state);
|
||||
}
|
||||
spin_unlock(&sp->so_lock);
|
||||
}
|
||||
|
||||
nfs_delegation_reap_unclaimed(clp);
|
||||
}
|
||||
|
||||
static void nfs_delegation_clear_all(struct nfs_client *clp)
|
||||
{
|
||||
nfs_delegation_mark_reclaim(clp);
|
||||
nfs_delegation_reap_unclaimed(clp);
|
||||
}
|
||||
|
||||
static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
|
||||
{
|
||||
nfs_delegation_clear_all(clp);
|
||||
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
|
||||
}
|
||||
|
||||
static void nfs4_state_end_reclaim_nograce(struct nfs_client *clp)
|
||||
{
|
||||
clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
|
||||
}
|
||||
|
||||
static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
|
||||
{
|
||||
switch (error) {
|
||||
case -NFS4ERR_CB_PATH_DOWN:
|
||||
nfs_handle_cb_pathdown(clp);
|
||||
break;
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
case -NFS4ERR_LEASE_MOVED:
|
||||
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
|
||||
nfs4_state_start_reclaim_reboot(clp);
|
||||
break;
|
||||
case -NFS4ERR_EXPIRED:
|
||||
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
|
||||
nfs4_state_start_reclaim_nograce(clp);
|
||||
}
|
||||
}
|
||||
|
||||
static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
|
||||
{
|
||||
struct rb_node *pos;
|
||||
int status = 0;
|
||||
|
||||
allow_signal(SIGKILL);
|
||||
|
||||
/* Ensure exclusive access to NFSv4 state */
|
||||
down_write(&clp->cl_sem);
|
||||
/* Are there any NFS mounts out there? */
|
||||
if (list_empty(&clp->cl_superblocks))
|
||||
goto out;
|
||||
restart_loop:
|
||||
ops = &nfs4_network_partition_recovery_ops;
|
||||
/* Are there any open files on this volume? */
|
||||
cred = nfs4_get_renew_cred(clp);
|
||||
if (cred != NULL) {
|
||||
/* Yes there are: try to renew the old lease */
|
||||
status = nfs4_proc_renew(clp, cred);
|
||||
put_rpccred(cred);
|
||||
switch (status) {
|
||||
case 0:
|
||||
case -NFS4ERR_CB_PATH_DOWN:
|
||||
goto out;
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
case -NFS4ERR_LEASE_MOVED:
|
||||
ops = &nfs4_reboot_recovery_ops;
|
||||
restart:
|
||||
spin_lock(&clp->cl_lock);
|
||||
for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
|
||||
struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
|
||||
if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
|
||||
continue;
|
||||
atomic_inc(&sp->so_count);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
status = nfs4_reclaim_open_state(sp, ops);
|
||||
if (status < 0) {
|
||||
set_bit(ops->owner_flag_bit, &sp->so_flags);
|
||||
nfs4_put_state_owner(sp);
|
||||
nfs4_recovery_handle_error(clp, status);
|
||||
return status;
|
||||
}
|
||||
} else {
|
||||
/* "reboot" to ensure we clear all state on the server */
|
||||
clp->cl_boot_time = CURRENT_TIME;
|
||||
nfs4_put_state_owner(sp);
|
||||
goto restart;
|
||||
}
|
||||
/* We're going to have to re-establish a clientid */
|
||||
nfs4_state_mark_reclaim(clp);
|
||||
status = -ENOENT;
|
||||
spin_unlock(&clp->cl_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
static int nfs4_check_lease(struct nfs_client *clp)
|
||||
{
|
||||
struct rpc_cred *cred;
|
||||
int status = -NFS4ERR_EXPIRED;
|
||||
|
||||
/* Is the client already known to have an expired lease? */
|
||||
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
|
||||
return 0;
|
||||
cred = nfs4_get_renew_cred(clp);
|
||||
if (cred == NULL) {
|
||||
cred = nfs4_get_setclientid_cred(clp);
|
||||
if (cred == NULL)
|
||||
goto out;
|
||||
}
|
||||
status = nfs4_proc_renew(clp, cred);
|
||||
put_rpccred(cred);
|
||||
out:
|
||||
nfs4_recovery_handle_error(clp, status);
|
||||
return status;
|
||||
}
|
||||
|
||||
static int nfs4_reclaim_lease(struct nfs_client *clp)
|
||||
{
|
||||
struct rpc_cred *cred;
|
||||
int status = -ENOENT;
|
||||
|
||||
cred = nfs4_get_setclientid_cred(clp);
|
||||
if (cred != NULL) {
|
||||
status = nfs4_init_client(clp, cred);
|
||||
|
@ -974,42 +1099,90 @@ static int reclaimer(void *ptr)
|
|||
/* Handle case where the user hasn't set up machine creds */
|
||||
if (status == -EACCES && cred == clp->cl_machine_cred) {
|
||||
nfs4_clear_machine_cred(clp);
|
||||
goto restart_loop;
|
||||
status = -EAGAIN;
|
||||
}
|
||||
}
|
||||
if (status)
|
||||
goto out_error;
|
||||
/* Mark all delegations for reclaim */
|
||||
nfs_delegation_mark_reclaim(clp);
|
||||
/* Note: list is protected by exclusive lock on cl->cl_sem */
|
||||
for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
|
||||
sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
|
||||
status = nfs4_reclaim_open_state(ops, sp);
|
||||
if (status < 0) {
|
||||
if (status == -NFS4ERR_NO_GRACE) {
|
||||
ops = &nfs4_network_partition_recovery_ops;
|
||||
status = nfs4_reclaim_open_state(ops, sp);
|
||||
return status;
|
||||
}
|
||||
|
||||
static void nfs4_state_manager(struct nfs_client *clp)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
/* Ensure exclusive access to NFSv4 state */
|
||||
for(;;) {
|
||||
if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
|
||||
/* We're going to have to re-establish a clientid */
|
||||
status = nfs4_reclaim_lease(clp);
|
||||
if (status) {
|
||||
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
|
||||
if (status == -EAGAIN)
|
||||
continue;
|
||||
goto out_error;
|
||||
}
|
||||
if (status == -NFS4ERR_STALE_CLIENTID)
|
||||
goto restart_loop;
|
||||
if (status == -NFS4ERR_EXPIRED)
|
||||
goto restart_loop;
|
||||
clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
|
||||
status = nfs4_check_lease(clp);
|
||||
if (status != 0)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* First recover reboot state... */
|
||||
if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
|
||||
status = nfs4_do_reclaim(clp, &nfs4_reboot_recovery_ops);
|
||||
if (status == -NFS4ERR_STALE_CLIENTID)
|
||||
continue;
|
||||
nfs4_state_end_reclaim_reboot(clp);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Now recover expired state... */
|
||||
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
|
||||
status = nfs4_do_reclaim(clp, &nfs4_nograce_recovery_ops);
|
||||
if (status < 0) {
|
||||
set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
|
||||
if (status == -NFS4ERR_STALE_CLIENTID)
|
||||
continue;
|
||||
if (status == -NFS4ERR_EXPIRED)
|
||||
continue;
|
||||
goto out_error;
|
||||
} else
|
||||
nfs4_state_end_reclaim_nograce(clp);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
|
||||
nfs_client_return_marked_delegations(clp);
|
||||
continue;
|
||||
}
|
||||
|
||||
nfs4_clear_state_manager_bit(clp);
|
||||
/* Did we race with an attempt to give us more work? */
|
||||
if (clp->cl_state == 0)
|
||||
break;
|
||||
if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
|
||||
break;
|
||||
}
|
||||
nfs_delegation_reap_unclaimed(clp);
|
||||
out:
|
||||
up_write(&clp->cl_sem);
|
||||
if (status == -NFS4ERR_CB_PATH_DOWN)
|
||||
nfs_handle_cb_pathdown(clp);
|
||||
nfs4_clear_recover_bit(clp);
|
||||
return;
|
||||
out_error:
|
||||
printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
|
||||
" with error %d\n", clp->cl_hostname, -status);
|
||||
if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
|
||||
nfs4_state_end_reclaim_reboot(clp);
|
||||
nfs4_clear_state_manager_bit(clp);
|
||||
}
|
||||
|
||||
static int nfs4_run_state_manager(void *ptr)
|
||||
{
|
||||
struct nfs_client *clp = ptr;
|
||||
|
||||
allow_signal(SIGKILL);
|
||||
nfs4_state_manager(clp);
|
||||
nfs_put_client(clp);
|
||||
module_put_and_exit(0);
|
||||
return 0;
|
||||
out_error:
|
||||
printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %s"
|
||||
" with error %d\n", clp->cl_hostname, -status);
|
||||
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
1217
fs/nfs/nfs4xdr.c
1217
fs/nfs/nfs4xdr.c
File diff suppressed because it is too large
Load Diff
|
@ -86,6 +86,8 @@
|
|||
#include <net/ipconfig.h>
|
||||
#include <linux/parser.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
/* Define this to allow debugging output */
|
||||
#undef NFSROOT_DEBUG
|
||||
#define NFSDBG_FACILITY NFSDBG_ROOT
|
||||
|
@ -100,7 +102,7 @@ static char nfs_root_name[256] __initdata = "";
|
|||
static __be32 servaddr __initdata = 0;
|
||||
|
||||
/* Name of directory to mount */
|
||||
static char nfs_path[NFS_MAXPATHLEN] __initdata = { 0, };
|
||||
static char nfs_export_path[NFS_MAXPATHLEN] __initdata = { 0, };
|
||||
|
||||
/* NFS-related data */
|
||||
static struct nfs_mount_data nfs_data __initdata = { 0, };/* NFS mount info */
|
||||
|
@ -312,7 +314,7 @@ static int __init root_nfs_name(char *name)
|
|||
printk(KERN_ERR "Root-NFS: Pathname for remote directory too long.\n");
|
||||
return -1;
|
||||
}
|
||||
sprintf(nfs_path, buf, cp);
|
||||
sprintf(nfs_export_path, buf, cp);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -340,7 +342,7 @@ static int __init root_nfs_addr(void)
|
|||
static void __init root_nfs_print(void)
|
||||
{
|
||||
printk(KERN_NOTICE "Root-NFS: Mounting %s on server %s as root\n",
|
||||
nfs_path, nfs_data.hostname);
|
||||
nfs_export_path, nfs_data.hostname);
|
||||
printk(KERN_NOTICE "Root-NFS: rsize = %d, wsize = %d, timeo = %d, retrans = %d\n",
|
||||
nfs_data.rsize, nfs_data.wsize, nfs_data.timeo, nfs_data.retrans);
|
||||
printk(KERN_NOTICE "Root-NFS: acreg (min,max) = (%d,%d), acdir (min,max) = (%d,%d)\n",
|
||||
|
@ -485,18 +487,23 @@ static int __init root_nfs_get_handle(void)
|
|||
{
|
||||
struct nfs_fh fh;
|
||||
struct sockaddr_in sin;
|
||||
struct nfs_mount_request request = {
|
||||
.sap = (struct sockaddr *)&sin,
|
||||
.salen = sizeof(sin),
|
||||
.dirpath = nfs_export_path,
|
||||
.version = (nfs_data.flags & NFS_MOUNT_VER3) ?
|
||||
NFS_MNT3_VERSION : NFS_MNT_VERSION,
|
||||
.protocol = (nfs_data.flags & NFS_MOUNT_TCP) ?
|
||||
XPRT_TRANSPORT_TCP : XPRT_TRANSPORT_UDP,
|
||||
.fh = &fh,
|
||||
};
|
||||
int status;
|
||||
int protocol = (nfs_data.flags & NFS_MOUNT_TCP) ?
|
||||
XPRT_TRANSPORT_TCP : XPRT_TRANSPORT_UDP;
|
||||
int version = (nfs_data.flags & NFS_MOUNT_VER3) ?
|
||||
NFS_MNT3_VERSION : NFS_MNT_VERSION;
|
||||
|
||||
set_sockaddr(&sin, servaddr, htons(mount_port));
|
||||
status = nfs_mount((struct sockaddr *) &sin, sizeof(sin), NULL,
|
||||
nfs_path, version, protocol, &fh);
|
||||
status = nfs_mount(&request);
|
||||
if (status < 0)
|
||||
printk(KERN_ERR "Root-NFS: Server returned error %d "
|
||||
"while mounting %s\n", status, nfs_path);
|
||||
"while mounting %s\n", status, nfs_export_path);
|
||||
else {
|
||||
nfs_data.root.size = fh.size;
|
||||
memcpy(nfs_data.root.data, fh.data, fh.size);
|
||||
|
|
|
@ -533,12 +533,6 @@ readpage_async_filler(void *data, struct page *page)
|
|||
unsigned int len;
|
||||
int error;
|
||||
|
||||
error = nfs_wb_page(inode, page);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
if (PageUptodate(page))
|
||||
goto out_unlock;
|
||||
|
||||
len = nfs_page_length(page);
|
||||
if (len == 0)
|
||||
return nfs_return_empty_page(page);
|
||||
|
|
|
@ -75,6 +75,7 @@ enum {
|
|||
Opt_acl, Opt_noacl,
|
||||
Opt_rdirplus, Opt_nordirplus,
|
||||
Opt_sharecache, Opt_nosharecache,
|
||||
Opt_resvport, Opt_noresvport,
|
||||
|
||||
/* Mount options that take integer arguments */
|
||||
Opt_port,
|
||||
|
@ -129,6 +130,8 @@ static const match_table_t nfs_mount_option_tokens = {
|
|||
{ Opt_nordirplus, "nordirplus" },
|
||||
{ Opt_sharecache, "sharecache" },
|
||||
{ Opt_nosharecache, "nosharecache" },
|
||||
{ Opt_resvport, "resvport" },
|
||||
{ Opt_noresvport, "noresvport" },
|
||||
|
||||
{ Opt_port, "port=%u" },
|
||||
{ Opt_rsize, "rsize=%u" },
|
||||
|
@ -512,7 +515,8 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
|
|||
{ NFS_MOUNT_NONLM, ",nolock", "" },
|
||||
{ NFS_MOUNT_NOACL, ",noacl", "" },
|
||||
{ NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" },
|
||||
{ NFS_MOUNT_UNSHARED, ",nosharecache", ""},
|
||||
{ NFS_MOUNT_UNSHARED, ",nosharecache", "" },
|
||||
{ NFS_MOUNT_NORESVPORT, ",noresvport", "" },
|
||||
{ 0, NULL, NULL }
|
||||
};
|
||||
const struct proc_nfs_info *nfs_infop;
|
||||
|
@ -1033,6 +1037,12 @@ static int nfs_parse_mount_options(char *raw,
|
|||
case Opt_nosharecache:
|
||||
mnt->flags |= NFS_MOUNT_UNSHARED;
|
||||
break;
|
||||
case Opt_resvport:
|
||||
mnt->flags &= ~NFS_MOUNT_NORESVPORT;
|
||||
break;
|
||||
case Opt_noresvport:
|
||||
mnt->flags |= NFS_MOUNT_NORESVPORT;
|
||||
break;
|
||||
|
||||
/*
|
||||
* options that take numeric values
|
||||
|
@ -1327,8 +1337,14 @@ static int nfs_parse_mount_options(char *raw,
|
|||
static int nfs_try_mount(struct nfs_parsed_mount_data *args,
|
||||
struct nfs_fh *root_fh)
|
||||
{
|
||||
struct sockaddr *sap = (struct sockaddr *)&args->mount_server.address;
|
||||
char *hostname;
|
||||
struct nfs_mount_request request = {
|
||||
.sap = (struct sockaddr *)
|
||||
&args->mount_server.address,
|
||||
.dirpath = args->nfs_server.export_path,
|
||||
.protocol = args->mount_server.protocol,
|
||||
.fh = root_fh,
|
||||
.noresvport = args->flags & NFS_MOUNT_NORESVPORT,
|
||||
};
|
||||
int status;
|
||||
|
||||
if (args->mount_server.version == 0) {
|
||||
|
@ -1337,42 +1353,38 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
|
|||
else
|
||||
args->mount_server.version = NFS_MNT_VERSION;
|
||||
}
|
||||
request.version = args->mount_server.version;
|
||||
|
||||
if (args->mount_server.hostname)
|
||||
hostname = args->mount_server.hostname;
|
||||
request.hostname = args->mount_server.hostname;
|
||||
else
|
||||
hostname = args->nfs_server.hostname;
|
||||
request.hostname = args->nfs_server.hostname;
|
||||
|
||||
/*
|
||||
* Construct the mount server's address.
|
||||
*/
|
||||
if (args->mount_server.address.ss_family == AF_UNSPEC) {
|
||||
memcpy(sap, &args->nfs_server.address,
|
||||
memcpy(request.sap, &args->nfs_server.address,
|
||||
args->nfs_server.addrlen);
|
||||
args->mount_server.addrlen = args->nfs_server.addrlen;
|
||||
}
|
||||
request.salen = args->mount_server.addrlen;
|
||||
|
||||
/*
|
||||
* autobind will be used if mount_server.port == 0
|
||||
*/
|
||||
nfs_set_port(sap, args->mount_server.port);
|
||||
nfs_set_port(request.sap, args->mount_server.port);
|
||||
|
||||
/*
|
||||
* Now ask the mount server to map our export path
|
||||
* to a file handle.
|
||||
*/
|
||||
status = nfs_mount(sap,
|
||||
args->mount_server.addrlen,
|
||||
hostname,
|
||||
args->nfs_server.export_path,
|
||||
args->mount_server.version,
|
||||
args->mount_server.protocol,
|
||||
root_fh);
|
||||
status = nfs_mount(&request);
|
||||
if (status == 0)
|
||||
return 0;
|
||||
|
||||
dfprintk(MOUNT, "NFS: unable to mount server %s, error %d\n",
|
||||
hostname, status);
|
||||
request.hostname, status);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -2419,7 +2431,7 @@ static void nfs4_kill_super(struct super_block *sb)
|
|||
{
|
||||
struct nfs_server *server = NFS_SB(sb);
|
||||
|
||||
nfs_return_all_delegations(sb);
|
||||
nfs_super_return_all_delegations(sb);
|
||||
kill_anon_super(sb);
|
||||
|
||||
nfs4_renewd_prepare_shutdown(server);
|
||||
|
|
|
@ -29,8 +29,8 @@
|
|||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
EXPORT_SYMBOL(nfsacl_encode);
|
||||
EXPORT_SYMBOL(nfsacl_decode);
|
||||
EXPORT_SYMBOL_GPL(nfsacl_encode);
|
||||
EXPORT_SYMBOL_GPL(nfsacl_decode);
|
||||
|
||||
struct nfsacl_encode_desc {
|
||||
struct xdr_array2_desc desc;
|
||||
|
|
|
@ -358,6 +358,7 @@ static struct rpc_program cb_program = {
|
|||
.nrvers = ARRAY_SIZE(nfs_cb_version),
|
||||
.version = nfs_cb_version,
|
||||
.stats = &cb_stats,
|
||||
.pipe_dir_name = "/nfsd4_cb",
|
||||
};
|
||||
|
||||
/* Reference counting, callback cleanup, etc., all look racy as heck.
|
||||
|
@ -382,8 +383,9 @@ static int do_probe_callback(void *data)
|
|||
.program = &cb_program,
|
||||
.prognumber = cb->cb_prog,
|
||||
.version = nfs_cb_version[1]->number,
|
||||
.authflavor = RPC_AUTH_UNIX, /* XXX: need AUTH_GSS... */
|
||||
.authflavor = clp->cl_flavor,
|
||||
.flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
|
||||
.client_name = clp->cl_principal,
|
||||
};
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
|
||||
|
@ -392,6 +394,11 @@ static int do_probe_callback(void *data)
|
|||
struct rpc_clnt *client;
|
||||
int status;
|
||||
|
||||
if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) {
|
||||
status = nfserr_cb_path_down;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Initialize address */
|
||||
memset(&addr, 0, sizeof(addr));
|
||||
addr.sin_family = AF_INET;
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/lockd/bind.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sunrpc/svcauth_gss.h>
|
||||
|
||||
#define NFSDDBG_FACILITY NFSDDBG_PROC
|
||||
|
||||
|
@ -377,6 +378,7 @@ free_client(struct nfs4_client *clp)
|
|||
shutdown_callback_client(clp);
|
||||
if (clp->cl_cred.cr_group_info)
|
||||
put_group_info(clp->cl_cred.cr_group_info);
|
||||
kfree(clp->cl_principal);
|
||||
kfree(clp->cl_name.data);
|
||||
kfree(clp);
|
||||
}
|
||||
|
@ -696,6 +698,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|||
unsigned int strhashval;
|
||||
struct nfs4_client *conf, *unconf, *new;
|
||||
__be32 status;
|
||||
char *princ;
|
||||
char dname[HEXDIR_LEN];
|
||||
|
||||
if (!check_name(clname))
|
||||
|
@ -783,6 +786,15 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|||
}
|
||||
copy_verf(new, &clverifier);
|
||||
new->cl_addr = sin->sin_addr.s_addr;
|
||||
new->cl_flavor = rqstp->rq_flavor;
|
||||
princ = svc_gss_principal(rqstp);
|
||||
if (princ) {
|
||||
new->cl_principal = kstrdup(princ, GFP_KERNEL);
|
||||
if (new->cl_principal == NULL) {
|
||||
free_client(new);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
copy_cred(&new->cl_cred, &rqstp->rq_cred);
|
||||
gen_confirm(new);
|
||||
gen_callback(new, setclid);
|
||||
|
|
|
@ -115,10 +115,20 @@ static inline u64 get_jiffies_64(void)
|
|||
((long)(a) - (long)(b) >= 0))
|
||||
#define time_before_eq(a,b) time_after_eq(b,a)
|
||||
|
||||
/*
|
||||
* Calculate whether a is in the range of [b, c].
|
||||
*/
|
||||
#define time_in_range(a,b,c) \
|
||||
(time_after_eq(a,b) && \
|
||||
time_before_eq(a,c))
|
||||
|
||||
/*
|
||||
* Calculate whether a is in the range of [b, c).
|
||||
*/
|
||||
#define time_in_range_open(a,b,c) \
|
||||
(time_after_eq(a,b) && \
|
||||
time_before(a,c))
|
||||
|
||||
/* Same as above, but does so with platform independent 64bit types.
|
||||
* These must be used when utilizing jiffies_64 (i.e. return value of
|
||||
* get_jiffies_64() */
|
||||
|
|
|
@ -41,6 +41,7 @@ struct nlmclnt_initdata {
|
|||
size_t addrlen;
|
||||
unsigned short protocol;
|
||||
u32 nfs_version;
|
||||
int noresvport;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -49,6 +49,7 @@ struct nlm_host {
|
|||
unsigned short h_proto; /* transport proto */
|
||||
unsigned short h_reclaiming : 1,
|
||||
h_server : 1, /* server side, not client side */
|
||||
h_noresvport : 1,
|
||||
h_inuse : 1;
|
||||
wait_queue_head_t h_gracewait; /* wait while reclaiming */
|
||||
struct rw_semaphore h_rwsem; /* Reboot recovery lock */
|
||||
|
@ -220,7 +221,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
|
|||
const size_t salen,
|
||||
const unsigned short protocol,
|
||||
const u32 version,
|
||||
const char *hostname);
|
||||
const char *hostname,
|
||||
int noresvport);
|
||||
struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
|
||||
const char *hostname,
|
||||
const size_t hostname_len);
|
||||
|
|
|
@ -83,7 +83,7 @@ struct nfs_open_context {
|
|||
struct rpc_cred *cred;
|
||||
struct nfs4_state *state;
|
||||
fl_owner_t lockowner;
|
||||
int mode;
|
||||
fmode_t mode;
|
||||
|
||||
unsigned long flags;
|
||||
#define NFS_CONTEXT_ERROR_WRITE (0)
|
||||
|
@ -130,7 +130,10 @@ struct nfs_inode {
|
|||
*
|
||||
* We need to revalidate the cached attrs for this inode if
|
||||
*
|
||||
* jiffies - read_cache_jiffies > attrtimeo
|
||||
* jiffies - read_cache_jiffies >= attrtimeo
|
||||
*
|
||||
* Please note the comparison is greater than or equal
|
||||
* so that zero timeout values can be specified.
|
||||
*/
|
||||
unsigned long read_cache_jiffies;
|
||||
unsigned long attrtimeo;
|
||||
|
@ -180,7 +183,7 @@ struct nfs_inode {
|
|||
/* NFSv4 state */
|
||||
struct list_head open_states;
|
||||
struct nfs_delegation *delegation;
|
||||
int delegation_state;
|
||||
fmode_t delegation_state;
|
||||
struct rw_semaphore rwsem;
|
||||
#endif /* CONFIG_NFS_V4*/
|
||||
struct inode vfs_inode;
|
||||
|
@ -342,7 +345,7 @@ extern int nfs_setattr(struct dentry *, struct iattr *);
|
|||
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
|
||||
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
|
||||
extern void put_nfs_open_context(struct nfs_open_context *ctx);
|
||||
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode);
|
||||
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
|
||||
extern u64 nfs_compat_user_ino64(u64 fileid);
|
||||
extern void nfs_fattr_init(struct nfs_fattr *fattr);
|
||||
|
||||
|
@ -532,12 +535,6 @@ static inline void nfs3_forget_cached_acls(struct inode *inode)
|
|||
}
|
||||
#endif /* CONFIG_NFS_V3_ACL */
|
||||
|
||||
/*
|
||||
* linux/fs/mount_clnt.c
|
||||
*/
|
||||
extern int nfs_mount(struct sockaddr *, size_t, char *, char *,
|
||||
int, int, struct nfs_fh *);
|
||||
|
||||
/*
|
||||
* inline functions
|
||||
*/
|
||||
|
|
|
@ -42,12 +42,6 @@ struct nfs_client {
|
|||
struct rb_root cl_openowner_id;
|
||||
struct rb_root cl_lockowner_id;
|
||||
|
||||
/*
|
||||
* The following rwsem ensures exclusive access to the server
|
||||
* while we recover the state following a lease expiration.
|
||||
*/
|
||||
struct rw_semaphore cl_sem;
|
||||
|
||||
struct list_head cl_delegations;
|
||||
struct rb_root cl_state_owners;
|
||||
spinlock_t cl_lock;
|
||||
|
|
|
@ -45,7 +45,7 @@ struct nfs_mount_data {
|
|||
char context[NFS_MAX_CONTEXT_LEN + 1]; /* 6 */
|
||||
};
|
||||
|
||||
/* bits in the flags field */
|
||||
/* bits in the flags field visible to user space */
|
||||
|
||||
#define NFS_MOUNT_SOFT 0x0001 /* 1 */
|
||||
#define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */
|
||||
|
@ -68,5 +68,6 @@ struct nfs_mount_data {
|
|||
/* The following are for internal use only */
|
||||
#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
|
||||
#define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000
|
||||
#define NFS_MOUNT_NORESVPORT 0x40000
|
||||
|
||||
#endif
|
||||
|
|
|
@ -120,13 +120,14 @@ struct nfs_openargs {
|
|||
const struct nfs_fh * fh;
|
||||
struct nfs_seqid * seqid;
|
||||
int open_flags;
|
||||
fmode_t fmode;
|
||||
__u64 clientid;
|
||||
__u64 id;
|
||||
union {
|
||||
struct iattr * attrs; /* UNCHECKED, GUARDED */
|
||||
nfs4_verifier verifier; /* EXCLUSIVE */
|
||||
nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
|
||||
int delegation_type; /* CLAIM_PREVIOUS */
|
||||
fmode_t delegation_type; /* CLAIM_PREVIOUS */
|
||||
} u;
|
||||
const struct qstr * name;
|
||||
const struct nfs_server *server; /* Needed for ID mapping */
|
||||
|
@ -143,7 +144,7 @@ struct nfs_openres {
|
|||
struct nfs_fattr * dir_attr;
|
||||
struct nfs_seqid * seqid;
|
||||
const struct nfs_server *server;
|
||||
int delegation_type;
|
||||
fmode_t delegation_type;
|
||||
nfs4_stateid delegation;
|
||||
__u32 do_recall;
|
||||
__u64 maxsize;
|
||||
|
@ -171,7 +172,7 @@ struct nfs_closeargs {
|
|||
struct nfs_fh * fh;
|
||||
nfs4_stateid * stateid;
|
||||
struct nfs_seqid * seqid;
|
||||
int open_flags;
|
||||
fmode_t fmode;
|
||||
const u32 * bitmask;
|
||||
};
|
||||
|
||||
|
|
|
@ -124,6 +124,8 @@ struct nfs4_client {
|
|||
nfs4_verifier cl_verifier; /* generated by client */
|
||||
time_t cl_time; /* time of last lease renewal */
|
||||
__be32 cl_addr; /* client ipaddress */
|
||||
u32 cl_flavor; /* setclientid pseudoflavor */
|
||||
char *cl_principal; /* setclientid principal name */
|
||||
struct svc_cred cl_cred; /* setclientid principal */
|
||||
clientid_t cl_clientid; /* generated by server */
|
||||
nfs4_verifier cl_confirm; /* generated by server */
|
||||
|
|
|
@ -58,6 +58,7 @@ struct rpc_clnt {
|
|||
struct rpc_timeout cl_timeout_default;
|
||||
struct rpc_program * cl_program;
|
||||
char cl_inline_name[32];
|
||||
char *cl_principal; /* target to authenticate to */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -108,6 +109,7 @@ struct rpc_create_args {
|
|||
u32 version;
|
||||
rpc_authflavor_t authflavor;
|
||||
unsigned long flags;
|
||||
char *client_name;
|
||||
};
|
||||
|
||||
/* Values for "flags" field */
|
||||
|
|
|
@ -15,6 +15,7 @@ struct rpc_pipe_ops {
|
|||
ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t);
|
||||
ssize_t (*downcall)(struct file *, const char __user *, size_t);
|
||||
void (*release_pipe)(struct inode *);
|
||||
int (*open_pipe)(struct inode *);
|
||||
void (*destroy_msg)(struct rpc_pipe_msg *);
|
||||
};
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ int gss_svc_init(void);
|
|||
void gss_svc_shutdown(void);
|
||||
int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
|
||||
u32 svcauth_gss_flavor(struct auth_domain *dom);
|
||||
char *svc_gss_principal(struct svc_rqst *);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
|
||||
|
|
|
@ -36,21 +36,6 @@ struct xdr_netobj {
|
|||
*/
|
||||
typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj);
|
||||
|
||||
/*
|
||||
* We're still requiring the BKL in the xdr code until it's been
|
||||
* more carefully audited, at which point this wrapper will become
|
||||
* unnecessary.
|
||||
*/
|
||||
static inline int rpc_call_xdrproc(kxdrproc_t xdrproc, void *rqstp, __be32 *data, void *obj)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = xdrproc(rqstp, data, obj);
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Basic structure for transmission/reception of a client XDR message.
|
||||
* Features a header (for a linear buffer containing RPC headers
|
||||
|
|
|
@ -76,8 +76,7 @@ struct rpc_rqst {
|
|||
struct list_head rq_list;
|
||||
|
||||
__u32 * rq_buffer; /* XDR encode buffer */
|
||||
size_t rq_bufsize,
|
||||
rq_callsize,
|
||||
size_t rq_callsize,
|
||||
rq_rcvsize;
|
||||
|
||||
struct xdr_buf rq_private_buf; /* The receive buffer
|
||||
|
|
|
@ -234,7 +234,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
|
|||
list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) {
|
||||
|
||||
/* Enforce a 60 second garbage collection moratorium */
|
||||
if (time_in_range(cred->cr_expire, expired, jiffies) &&
|
||||
if (time_in_range_open(cred->cr_expire, expired, jiffies) &&
|
||||
test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0)
|
||||
continue;
|
||||
|
||||
|
@ -515,7 +515,7 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
|
|||
if (cred->cr_ops->crwrap_req)
|
||||
return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
|
||||
/* By default, we encode the arguments normally. */
|
||||
return rpc_call_xdrproc(encode, rqstp, data, obj);
|
||||
return encode(rqstp, data, obj);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -530,7 +530,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
|
|||
return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
|
||||
data, obj);
|
||||
/* By default, we decode the arguments normally. */
|
||||
return rpc_call_xdrproc(decode, rqstp, data, obj);
|
||||
return decode(rqstp, data, obj);
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -72,11 +72,25 @@ struct gss_auth {
|
|||
struct gss_api_mech *mech;
|
||||
enum rpc_gss_svc service;
|
||||
struct rpc_clnt *client;
|
||||
struct dentry *dentry;
|
||||
/*
|
||||
* There are two upcall pipes; dentry[1], named "gssd", is used
|
||||
* for the new text-based upcall; dentry[0] is named after the
|
||||
* mechanism (for example, "krb5") and exists for
|
||||
* backwards-compatibility with older gssd's.
|
||||
*/
|
||||
struct dentry *dentry[2];
|
||||
};
|
||||
|
||||
/* pipe_version >= 0 if and only if someone has a pipe open. */
|
||||
static int pipe_version = -1;
|
||||
static atomic_t pipe_users = ATOMIC_INIT(0);
|
||||
static DEFINE_SPINLOCK(pipe_version_lock);
|
||||
static struct rpc_wait_queue pipe_version_rpc_waitqueue;
|
||||
static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
|
||||
|
||||
static void gss_free_ctx(struct gss_cl_ctx *);
|
||||
static struct rpc_pipe_ops gss_upcall_ops;
|
||||
static struct rpc_pipe_ops gss_upcall_ops_v0;
|
||||
static struct rpc_pipe_ops gss_upcall_ops_v1;
|
||||
|
||||
static inline struct gss_cl_ctx *
|
||||
gss_get_ctx(struct gss_cl_ctx *ctx)
|
||||
|
@ -220,6 +234,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
|
|||
return p;
|
||||
}
|
||||
|
||||
#define UPCALL_BUF_LEN 128
|
||||
|
||||
struct gss_upcall_msg {
|
||||
atomic_t count;
|
||||
|
@ -227,16 +242,41 @@ struct gss_upcall_msg {
|
|||
struct rpc_pipe_msg msg;
|
||||
struct list_head list;
|
||||
struct gss_auth *auth;
|
||||
struct rpc_inode *inode;
|
||||
struct rpc_wait_queue rpc_waitqueue;
|
||||
wait_queue_head_t waitqueue;
|
||||
struct gss_cl_ctx *ctx;
|
||||
char databuf[UPCALL_BUF_LEN];
|
||||
};
|
||||
|
||||
static int get_pipe_version(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock(&pipe_version_lock);
|
||||
if (pipe_version >= 0) {
|
||||
atomic_inc(&pipe_users);
|
||||
ret = pipe_version;
|
||||
} else
|
||||
ret = -EAGAIN;
|
||||
spin_unlock(&pipe_version_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void put_pipe_version(void)
|
||||
{
|
||||
if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) {
|
||||
pipe_version = -1;
|
||||
spin_unlock(&pipe_version_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gss_release_msg(struct gss_upcall_msg *gss_msg)
|
||||
{
|
||||
if (!atomic_dec_and_test(&gss_msg->count))
|
||||
return;
|
||||
put_pipe_version();
|
||||
BUG_ON(!list_empty(&gss_msg->list));
|
||||
if (gss_msg->ctx != NULL)
|
||||
gss_put_ctx(gss_msg->ctx);
|
||||
|
@ -266,8 +306,8 @@ __gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
|
|||
static inline struct gss_upcall_msg *
|
||||
gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg)
|
||||
{
|
||||
struct inode *inode = gss_auth->dentry->d_inode;
|
||||
struct rpc_inode *rpci = RPC_I(inode);
|
||||
struct rpc_inode *rpci = gss_msg->inode;
|
||||
struct inode *inode = &rpci->vfs_inode;
|
||||
struct gss_upcall_msg *old;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
|
@ -293,8 +333,7 @@ __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
|
|||
static void
|
||||
gss_unhash_msg(struct gss_upcall_msg *gss_msg)
|
||||
{
|
||||
struct gss_auth *gss_auth = gss_msg->auth;
|
||||
struct inode *inode = gss_auth->dentry->d_inode;
|
||||
struct inode *inode = &gss_msg->inode->vfs_inode;
|
||||
|
||||
if (list_empty(&gss_msg->list))
|
||||
return;
|
||||
|
@ -310,7 +349,7 @@ gss_upcall_callback(struct rpc_task *task)
|
|||
struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
|
||||
struct gss_cred, gc_base);
|
||||
struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
|
||||
struct inode *inode = gss_msg->auth->dentry->d_inode;
|
||||
struct inode *inode = &gss_msg->inode->vfs_inode;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
if (gss_msg->ctx)
|
||||
|
@ -323,22 +362,75 @@ gss_upcall_callback(struct rpc_task *task)
|
|||
gss_release_msg(gss_msg);
|
||||
}
|
||||
|
||||
static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
|
||||
{
|
||||
gss_msg->msg.data = &gss_msg->uid;
|
||||
gss_msg->msg.len = sizeof(gss_msg->uid);
|
||||
}
|
||||
|
||||
static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
|
||||
struct rpc_clnt *clnt, int machine_cred)
|
||||
{
|
||||
char *p = gss_msg->databuf;
|
||||
int len = 0;
|
||||
|
||||
gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ",
|
||||
gss_msg->auth->mech->gm_name,
|
||||
gss_msg->uid);
|
||||
p += gss_msg->msg.len;
|
||||
if (clnt->cl_principal) {
|
||||
len = sprintf(p, "target=%s ", clnt->cl_principal);
|
||||
p += len;
|
||||
gss_msg->msg.len += len;
|
||||
}
|
||||
if (machine_cred) {
|
||||
len = sprintf(p, "service=* ");
|
||||
p += len;
|
||||
gss_msg->msg.len += len;
|
||||
} else if (!strcmp(clnt->cl_program->name, "nfs4_cb")) {
|
||||
len = sprintf(p, "service=nfs ");
|
||||
p += len;
|
||||
gss_msg->msg.len += len;
|
||||
}
|
||||
len = sprintf(p, "\n");
|
||||
gss_msg->msg.len += len;
|
||||
|
||||
gss_msg->msg.data = gss_msg->databuf;
|
||||
BUG_ON(gss_msg->msg.len > UPCALL_BUF_LEN);
|
||||
}
|
||||
|
||||
static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
|
||||
struct rpc_clnt *clnt, int machine_cred)
|
||||
{
|
||||
if (pipe_version == 0)
|
||||
gss_encode_v0_msg(gss_msg);
|
||||
else /* pipe_version == 1 */
|
||||
gss_encode_v1_msg(gss_msg, clnt, machine_cred);
|
||||
}
|
||||
|
||||
static inline struct gss_upcall_msg *
|
||||
gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
|
||||
gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid, struct rpc_clnt *clnt,
|
||||
int machine_cred)
|
||||
{
|
||||
struct gss_upcall_msg *gss_msg;
|
||||
int vers;
|
||||
|
||||
gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
|
||||
if (gss_msg != NULL) {
|
||||
INIT_LIST_HEAD(&gss_msg->list);
|
||||
rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
|
||||
init_waitqueue_head(&gss_msg->waitqueue);
|
||||
atomic_set(&gss_msg->count, 1);
|
||||
gss_msg->msg.data = &gss_msg->uid;
|
||||
gss_msg->msg.len = sizeof(gss_msg->uid);
|
||||
gss_msg->uid = uid;
|
||||
gss_msg->auth = gss_auth;
|
||||
if (gss_msg == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
vers = get_pipe_version();
|
||||
if (vers < 0) {
|
||||
kfree(gss_msg);
|
||||
return ERR_PTR(vers);
|
||||
}
|
||||
gss_msg->inode = RPC_I(gss_auth->dentry[vers]->d_inode);
|
||||
INIT_LIST_HEAD(&gss_msg->list);
|
||||
rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
|
||||
init_waitqueue_head(&gss_msg->waitqueue);
|
||||
atomic_set(&gss_msg->count, 1);
|
||||
gss_msg->uid = uid;
|
||||
gss_msg->auth = gss_auth;
|
||||
gss_encode_msg(gss_msg, clnt, machine_cred);
|
||||
return gss_msg;
|
||||
}
|
||||
|
||||
|
@ -350,16 +442,13 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
|
|||
struct gss_upcall_msg *gss_new, *gss_msg;
|
||||
uid_t uid = cred->cr_uid;
|
||||
|
||||
/* Special case: rpc.gssd assumes that uid == 0 implies machine creds */
|
||||
if (gss_cred->gc_machine_cred != 0)
|
||||
uid = 0;
|
||||
|
||||
gss_new = gss_alloc_msg(gss_auth, uid);
|
||||
if (gss_new == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred);
|
||||
if (IS_ERR(gss_new))
|
||||
return gss_new;
|
||||
gss_msg = gss_add_msg(gss_auth, gss_new);
|
||||
if (gss_msg == gss_new) {
|
||||
int res = rpc_queue_upcall(gss_auth->dentry->d_inode, &gss_new->msg);
|
||||
struct inode *inode = &gss_new->inode->vfs_inode;
|
||||
int res = rpc_queue_upcall(inode, &gss_new->msg);
|
||||
if (res) {
|
||||
gss_unhash_msg(gss_new);
|
||||
gss_msg = ERR_PTR(res);
|
||||
|
@ -369,6 +458,18 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
|
|||
return gss_msg;
|
||||
}
|
||||
|
||||
static void warn_gssd(void)
|
||||
{
|
||||
static unsigned long ratelimit;
|
||||
unsigned long now = jiffies;
|
||||
|
||||
if (time_after(now, ratelimit)) {
|
||||
printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
|
||||
"Please check user daemon is running.\n");
|
||||
ratelimit = now + 15*HZ;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int
|
||||
gss_refresh_upcall(struct rpc_task *task)
|
||||
{
|
||||
|
@ -378,16 +479,25 @@ gss_refresh_upcall(struct rpc_task *task)
|
|||
struct gss_cred *gss_cred = container_of(cred,
|
||||
struct gss_cred, gc_base);
|
||||
struct gss_upcall_msg *gss_msg;
|
||||
struct inode *inode = gss_auth->dentry->d_inode;
|
||||
struct inode *inode;
|
||||
int err = 0;
|
||||
|
||||
dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
|
||||
cred->cr_uid);
|
||||
gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
|
||||
if (IS_ERR(gss_msg) == -EAGAIN) {
|
||||
/* XXX: warning on the first, under the assumption we
|
||||
* shouldn't normally hit this case on a refresh. */
|
||||
warn_gssd();
|
||||
task->tk_timeout = 15*HZ;
|
||||
rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL);
|
||||
return 0;
|
||||
}
|
||||
if (IS_ERR(gss_msg)) {
|
||||
err = PTR_ERR(gss_msg);
|
||||
goto out;
|
||||
}
|
||||
inode = &gss_msg->inode->vfs_inode;
|
||||
spin_lock(&inode->i_lock);
|
||||
if (gss_cred->gc_upcall != NULL)
|
||||
rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
|
||||
|
@ -414,18 +524,29 @@ gss_refresh_upcall(struct rpc_task *task)
|
|||
static inline int
|
||||
gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
|
||||
{
|
||||
struct inode *inode = gss_auth->dentry->d_inode;
|
||||
struct inode *inode;
|
||||
struct rpc_cred *cred = &gss_cred->gc_base;
|
||||
struct gss_upcall_msg *gss_msg;
|
||||
DEFINE_WAIT(wait);
|
||||
int err = 0;
|
||||
|
||||
dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid);
|
||||
retry:
|
||||
gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
|
||||
if (PTR_ERR(gss_msg) == -EAGAIN) {
|
||||
err = wait_event_interruptible_timeout(pipe_version_waitqueue,
|
||||
pipe_version >= 0, 15*HZ);
|
||||
if (err)
|
||||
goto out;
|
||||
if (pipe_version < 0)
|
||||
warn_gssd();
|
||||
goto retry;
|
||||
}
|
||||
if (IS_ERR(gss_msg)) {
|
||||
err = PTR_ERR(gss_msg);
|
||||
goto out;
|
||||
}
|
||||
inode = &gss_msg->inode->vfs_inode;
|
||||
for (;;) {
|
||||
prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
|
||||
spin_lock(&inode->i_lock);
|
||||
|
@ -543,6 +664,38 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int gss_pipe_open(struct inode *inode, int new_version)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&pipe_version_lock);
|
||||
if (pipe_version < 0) {
|
||||
/* First open of any gss pipe determines the version: */
|
||||
pipe_version = new_version;
|
||||
rpc_wake_up(&pipe_version_rpc_waitqueue);
|
||||
wake_up(&pipe_version_waitqueue);
|
||||
} else if (pipe_version != new_version) {
|
||||
/* Trying to open a pipe of a different version */
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
atomic_inc(&pipe_users);
|
||||
out:
|
||||
spin_unlock(&pipe_version_lock);
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
static int gss_pipe_open_v0(struct inode *inode)
|
||||
{
|
||||
return gss_pipe_open(inode, 0);
|
||||
}
|
||||
|
||||
static int gss_pipe_open_v1(struct inode *inode)
|
||||
{
|
||||
return gss_pipe_open(inode, 1);
|
||||
}
|
||||
|
||||
static void
|
||||
gss_pipe_release(struct inode *inode)
|
||||
{
|
||||
|
@ -562,27 +715,22 @@ gss_pipe_release(struct inode *inode)
|
|||
spin_lock(&inode->i_lock);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
put_pipe_version();
|
||||
}
|
||||
|
||||
static void
|
||||
gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
|
||||
{
|
||||
struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
|
||||
static unsigned long ratelimit;
|
||||
|
||||
if (msg->errno < 0) {
|
||||
dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n",
|
||||
gss_msg);
|
||||
atomic_inc(&gss_msg->count);
|
||||
gss_unhash_msg(gss_msg);
|
||||
if (msg->errno == -ETIMEDOUT) {
|
||||
unsigned long now = jiffies;
|
||||
if (time_after(now, ratelimit)) {
|
||||
printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
|
||||
"Please check user daemon is running!\n");
|
||||
ratelimit = now + 15*HZ;
|
||||
}
|
||||
}
|
||||
if (msg->errno == -ETIMEDOUT)
|
||||
warn_gssd();
|
||||
gss_release_msg(gss_msg);
|
||||
}
|
||||
}
|
||||
|
@ -623,20 +771,38 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
|
|||
atomic_set(&auth->au_count, 1);
|
||||
kref_init(&gss_auth->kref);
|
||||
|
||||
gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name,
|
||||
clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
|
||||
if (IS_ERR(gss_auth->dentry)) {
|
||||
err = PTR_ERR(gss_auth->dentry);
|
||||
/*
|
||||
* Note: if we created the old pipe first, then someone who
|
||||
* examined the directory at the right moment might conclude
|
||||
* that we supported only the old pipe. So we instead create
|
||||
* the new pipe first.
|
||||
*/
|
||||
gss_auth->dentry[1] = rpc_mkpipe(clnt->cl_dentry,
|
||||
"gssd",
|
||||
clnt, &gss_upcall_ops_v1,
|
||||
RPC_PIPE_WAIT_FOR_OPEN);
|
||||
if (IS_ERR(gss_auth->dentry[1])) {
|
||||
err = PTR_ERR(gss_auth->dentry[1]);
|
||||
goto err_put_mech;
|
||||
}
|
||||
|
||||
gss_auth->dentry[0] = rpc_mkpipe(clnt->cl_dentry,
|
||||
gss_auth->mech->gm_name,
|
||||
clnt, &gss_upcall_ops_v0,
|
||||
RPC_PIPE_WAIT_FOR_OPEN);
|
||||
if (IS_ERR(gss_auth->dentry[0])) {
|
||||
err = PTR_ERR(gss_auth->dentry[0]);
|
||||
goto err_unlink_pipe_1;
|
||||
}
|
||||
err = rpcauth_init_credcache(auth);
|
||||
if (err)
|
||||
goto err_unlink_pipe;
|
||||
goto err_unlink_pipe_0;
|
||||
|
||||
return auth;
|
||||
err_unlink_pipe:
|
||||
rpc_unlink(gss_auth->dentry);
|
||||
err_unlink_pipe_0:
|
||||
rpc_unlink(gss_auth->dentry[0]);
|
||||
err_unlink_pipe_1:
|
||||
rpc_unlink(gss_auth->dentry[1]);
|
||||
err_put_mech:
|
||||
gss_mech_put(gss_auth->mech);
|
||||
err_free:
|
||||
|
@ -649,8 +815,8 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
|
|||
static void
|
||||
gss_free(struct gss_auth *gss_auth)
|
||||
{
|
||||
rpc_unlink(gss_auth->dentry);
|
||||
gss_auth->dentry = NULL;
|
||||
rpc_unlink(gss_auth->dentry[1]);
|
||||
rpc_unlink(gss_auth->dentry[0]);
|
||||
gss_mech_put(gss_auth->mech);
|
||||
|
||||
kfree(gss_auth);
|
||||
|
@ -693,7 +859,7 @@ gss_destroying_context(struct rpc_cred *cred)
|
|||
struct rpc_task *task;
|
||||
|
||||
if (gss_cred->gc_ctx == NULL ||
|
||||
test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
|
||||
test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
|
||||
return 0;
|
||||
|
||||
gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY;
|
||||
|
@ -757,14 +923,12 @@ gss_free_cred_callback(struct rcu_head *head)
|
|||
}
|
||||
|
||||
static void
|
||||
gss_destroy_cred(struct rpc_cred *cred)
|
||||
gss_destroy_nullcred(struct rpc_cred *cred)
|
||||
{
|
||||
struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
|
||||
struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
|
||||
struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
|
||||
|
||||
if (gss_destroying_context(cred))
|
||||
return;
|
||||
rcu_assign_pointer(gss_cred->gc_ctx, NULL);
|
||||
call_rcu(&cred->cr_rcu, gss_free_cred_callback);
|
||||
if (ctx)
|
||||
|
@ -772,6 +936,15 @@ gss_destroy_cred(struct rpc_cred *cred)
|
|||
kref_put(&gss_auth->kref, gss_free_callback);
|
||||
}
|
||||
|
||||
static void
|
||||
gss_destroy_cred(struct rpc_cred *cred)
|
||||
{
|
||||
|
||||
if (gss_destroying_context(cred))
|
||||
return;
|
||||
gss_destroy_nullcred(cred);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lookup RPCSEC_GSS cred for the current process
|
||||
*/
|
||||
|
@ -1017,7 +1190,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
|
|||
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
|
||||
*p++ = htonl(rqstp->rq_seqno);
|
||||
|
||||
status = rpc_call_xdrproc(encode, rqstp, p, obj);
|
||||
status = encode(rqstp, p, obj);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
|
@ -1111,7 +1284,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
|
|||
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
|
||||
*p++ = htonl(rqstp->rq_seqno);
|
||||
|
||||
status = rpc_call_xdrproc(encode, rqstp, p, obj);
|
||||
status = encode(rqstp, p, obj);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
|
@ -1170,12 +1343,12 @@ gss_wrap_req(struct rpc_task *task,
|
|||
/* The spec seems a little ambiguous here, but I think that not
|
||||
* wrapping context destruction requests makes the most sense.
|
||||
*/
|
||||
status = rpc_call_xdrproc(encode, rqstp, p, obj);
|
||||
status = encode(rqstp, p, obj);
|
||||
goto out;
|
||||
}
|
||||
switch (gss_cred->gc_service) {
|
||||
case RPC_GSS_SVC_NONE:
|
||||
status = rpc_call_xdrproc(encode, rqstp, p, obj);
|
||||
status = encode(rqstp, p, obj);
|
||||
break;
|
||||
case RPC_GSS_SVC_INTEGRITY:
|
||||
status = gss_wrap_req_integ(cred, ctx, encode,
|
||||
|
@ -1291,7 +1464,7 @@ gss_unwrap_resp(struct rpc_task *task,
|
|||
cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
|
||||
+ (savedlen - head->iov_len);
|
||||
out_decode:
|
||||
status = rpc_call_xdrproc(decode, rqstp, p, obj);
|
||||
status = decode(rqstp, p, obj);
|
||||
out:
|
||||
gss_put_ctx(ctx);
|
||||
dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
|
||||
|
@ -1324,7 +1497,7 @@ static const struct rpc_credops gss_credops = {
|
|||
|
||||
static const struct rpc_credops gss_nullops = {
|
||||
.cr_name = "AUTH_GSS",
|
||||
.crdestroy = gss_destroy_cred,
|
||||
.crdestroy = gss_destroy_nullcred,
|
||||
.crbind = rpcauth_generic_bind_cred,
|
||||
.crmatch = gss_match,
|
||||
.crmarshal = gss_marshal,
|
||||
|
@ -1334,10 +1507,19 @@ static const struct rpc_credops gss_nullops = {
|
|||
.crunwrap_resp = gss_unwrap_resp,
|
||||
};
|
||||
|
||||
static struct rpc_pipe_ops gss_upcall_ops = {
|
||||
static struct rpc_pipe_ops gss_upcall_ops_v0 = {
|
||||
.upcall = gss_pipe_upcall,
|
||||
.downcall = gss_pipe_downcall,
|
||||
.destroy_msg = gss_pipe_destroy_msg,
|
||||
.open_pipe = gss_pipe_open_v0,
|
||||
.release_pipe = gss_pipe_release,
|
||||
};
|
||||
|
||||
static struct rpc_pipe_ops gss_upcall_ops_v1 = {
|
||||
.upcall = gss_pipe_upcall,
|
||||
.downcall = gss_pipe_downcall,
|
||||
.destroy_msg = gss_pipe_destroy_msg,
|
||||
.open_pipe = gss_pipe_open_v1,
|
||||
.release_pipe = gss_pipe_release,
|
||||
};
|
||||
|
||||
|
@ -1354,6 +1536,7 @@ static int __init init_rpcsec_gss(void)
|
|||
err = gss_svc_init();
|
||||
if (err)
|
||||
goto out_unregister;
|
||||
rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
|
||||
return 0;
|
||||
out_unregister:
|
||||
rpcauth_unregister(&authgss_ops);
|
||||
|
|
|
@ -152,7 +152,7 @@ g_token_size(struct xdr_netobj *mech, unsigned int body_size)
|
|||
return(1 + der_length_size(body_size) + body_size);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(g_token_size);
|
||||
EXPORT_SYMBOL_GPL(g_token_size);
|
||||
|
||||
/* fills in a buffer with the token header. The buffer is assumed to
|
||||
be the right size. buf is advanced past the token header */
|
||||
|
@ -167,7 +167,7 @@ g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf)
|
|||
TWRITE_STR(*buf, mech->data, ((int) mech->len));
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(g_make_token_header);
|
||||
EXPORT_SYMBOL_GPL(g_make_token_header);
|
||||
|
||||
/*
|
||||
* Given a buffer containing a token, reads and verifies the token,
|
||||
|
@ -231,5 +231,5 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
|
|||
return(ret);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(g_verify_token_header);
|
||||
EXPORT_SYMBOL_GPL(g_verify_token_header);
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ gss_mech_register(struct gss_api_mech *gm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(gss_mech_register);
|
||||
EXPORT_SYMBOL_GPL(gss_mech_register);
|
||||
|
||||
void
|
||||
gss_mech_unregister(struct gss_api_mech *gm)
|
||||
|
@ -129,7 +129,7 @@ gss_mech_unregister(struct gss_api_mech *gm)
|
|||
gss_mech_free(gm);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(gss_mech_unregister);
|
||||
EXPORT_SYMBOL_GPL(gss_mech_unregister);
|
||||
|
||||
struct gss_api_mech *
|
||||
gss_mech_get(struct gss_api_mech *gm)
|
||||
|
@ -138,7 +138,7 @@ gss_mech_get(struct gss_api_mech *gm)
|
|||
return gm;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(gss_mech_get);
|
||||
EXPORT_SYMBOL_GPL(gss_mech_get);
|
||||
|
||||
struct gss_api_mech *
|
||||
gss_mech_get_by_name(const char *name)
|
||||
|
@ -158,7 +158,7 @@ gss_mech_get_by_name(const char *name)
|
|||
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(gss_mech_get_by_name);
|
||||
EXPORT_SYMBOL_GPL(gss_mech_get_by_name);
|
||||
|
||||
static inline int
|
||||
mech_supports_pseudoflavor(struct gss_api_mech *gm, u32 pseudoflavor)
|
||||
|
@ -191,7 +191,7 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
|
|||
return gm;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(gss_mech_get_by_pseudoflavor);
|
||||
EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
|
||||
|
||||
u32
|
||||
gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
|
||||
|
@ -205,7 +205,7 @@ gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
|
|||
}
|
||||
return RPC_AUTH_MAXFLAVOR; /* illegal value */
|
||||
}
|
||||
EXPORT_SYMBOL(gss_svc_to_pseudoflavor);
|
||||
EXPORT_SYMBOL_GPL(gss_svc_to_pseudoflavor);
|
||||
|
||||
u32
|
||||
gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor)
|
||||
|
@ -219,7 +219,7 @@ gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor)
|
|||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(gss_pseudoflavor_to_service);
|
||||
EXPORT_SYMBOL_GPL(gss_pseudoflavor_to_service);
|
||||
|
||||
char *
|
||||
gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service)
|
||||
|
@ -233,7 +233,7 @@ gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(gss_service_to_auth_domain_name);
|
||||
EXPORT_SYMBOL_GPL(gss_service_to_auth_domain_name);
|
||||
|
||||
void
|
||||
gss_mech_put(struct gss_api_mech * gm)
|
||||
|
@ -242,7 +242,7 @@ gss_mech_put(struct gss_api_mech * gm)
|
|||
module_put(gm->gm_owner);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(gss_mech_put);
|
||||
EXPORT_SYMBOL_GPL(gss_mech_put);
|
||||
|
||||
/* The mech could probably be determined from the token instead, but it's just
|
||||
* as easy for now to pass it in. */
|
||||
|
|
|
@ -332,6 +332,7 @@ struct rsc {
|
|||
struct svc_cred cred;
|
||||
struct gss_svc_seq_data seqdata;
|
||||
struct gss_ctx *mechctx;
|
||||
char *client_name;
|
||||
};
|
||||
|
||||
static struct cache_head *rsc_table[RSC_HASHMAX];
|
||||
|
@ -346,6 +347,7 @@ static void rsc_free(struct rsc *rsci)
|
|||
gss_delete_sec_context(&rsci->mechctx);
|
||||
if (rsci->cred.cr_group_info)
|
||||
put_group_info(rsci->cred.cr_group_info);
|
||||
kfree(rsci->client_name);
|
||||
}
|
||||
|
||||
static void rsc_put(struct kref *ref)
|
||||
|
@ -383,6 +385,7 @@ rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
|
|||
tmp->handle.data = NULL;
|
||||
new->mechctx = NULL;
|
||||
new->cred.cr_group_info = NULL;
|
||||
new->client_name = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -397,6 +400,8 @@ update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
|
|||
spin_lock_init(&new->seqdata.sd_lock);
|
||||
new->cred = tmp->cred;
|
||||
tmp->cred.cr_group_info = NULL;
|
||||
new->client_name = tmp->client_name;
|
||||
tmp->client_name = NULL;
|
||||
}
|
||||
|
||||
static struct cache_head *
|
||||
|
@ -486,6 +491,15 @@ static int rsc_parse(struct cache_detail *cd,
|
|||
status = gss_import_sec_context(buf, len, gm, &rsci.mechctx);
|
||||
if (status)
|
||||
goto out;
|
||||
|
||||
/* get client name */
|
||||
len = qword_get(&mesg, buf, mlen);
|
||||
if (len > 0) {
|
||||
rsci.client_name = kstrdup(buf, GFP_KERNEL);
|
||||
if (!rsci.client_name)
|
||||
goto out;
|
||||
}
|
||||
|
||||
}
|
||||
rsci.h.expiry_time = expiry;
|
||||
rscp = rsc_update(&rsci, rscp);
|
||||
|
@ -746,7 +760,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom)
|
|||
return gd->pseudoflavor;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(svcauth_gss_flavor);
|
||||
EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
|
||||
|
||||
int
|
||||
svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
|
||||
|
@ -780,7 +794,7 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
|
|||
return stat;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(svcauth_gss_register_pseudoflavor);
|
||||
EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
|
||||
|
||||
static inline int
|
||||
read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
|
||||
|
@ -913,6 +927,16 @@ struct gss_svc_data {
|
|||
struct rsc *rsci;
|
||||
};
|
||||
|
||||
char *svc_gss_principal(struct svc_rqst *rqstp)
|
||||
{
|
||||
struct gss_svc_data *gd = (struct gss_svc_data *)rqstp->rq_auth_data;
|
||||
|
||||
if (gd && gd->rsci)
|
||||
return gd->rsci->client_name;
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(svc_gss_principal);
|
||||
|
||||
static int
|
||||
svcauth_gss_set_client(struct svc_rqst *rqstp)
|
||||
{
|
||||
|
|
|
@ -197,6 +197,12 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
|
|||
|
||||
clnt->cl_rtt = &clnt->cl_rtt_default;
|
||||
rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
|
||||
clnt->cl_principal = NULL;
|
||||
if (args->client_name) {
|
||||
clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
|
||||
if (!clnt->cl_principal)
|
||||
goto out_no_principal;
|
||||
}
|
||||
|
||||
kref_init(&clnt->cl_kref);
|
||||
|
||||
|
@ -226,6 +232,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
|
|||
rpc_put_mount();
|
||||
}
|
||||
out_no_path:
|
||||
kfree(clnt->cl_principal);
|
||||
out_no_principal:
|
||||
rpc_free_iostats(clnt->cl_metrics);
|
||||
out_no_stats:
|
||||
if (clnt->cl_server != clnt->cl_inline_name)
|
||||
|
@ -354,6 +362,11 @@ rpc_clone_client(struct rpc_clnt *clnt)
|
|||
new->cl_metrics = rpc_alloc_iostats(clnt);
|
||||
if (new->cl_metrics == NULL)
|
||||
goto out_no_stats;
|
||||
if (clnt->cl_principal) {
|
||||
new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL);
|
||||
if (new->cl_principal == NULL)
|
||||
goto out_no_principal;
|
||||
}
|
||||
kref_init(&new->cl_kref);
|
||||
err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
|
||||
if (err != 0)
|
||||
|
@ -366,6 +379,8 @@ rpc_clone_client(struct rpc_clnt *clnt)
|
|||
rpciod_up();
|
||||
return new;
|
||||
out_no_path:
|
||||
kfree(new->cl_principal);
|
||||
out_no_principal:
|
||||
rpc_free_iostats(new->cl_metrics);
|
||||
out_no_stats:
|
||||
kfree(new);
|
||||
|
@ -417,6 +432,7 @@ rpc_free_client(struct kref *kref)
|
|||
out_free:
|
||||
rpc_unregister_client(clnt);
|
||||
rpc_free_iostats(clnt->cl_metrics);
|
||||
kfree(clnt->cl_principal);
|
||||
clnt->cl_metrics = NULL;
|
||||
xprt_put(clnt->cl_xprt);
|
||||
rpciod_down();
|
||||
|
|
|
@ -113,7 +113,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
|
|||
wake_up(&rpci->waitq);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_queue_upcall);
|
||||
EXPORT_SYMBOL_GPL(rpc_queue_upcall);
|
||||
|
||||
static inline void
|
||||
rpc_inode_setowner(struct inode *inode, void *private)
|
||||
|
@ -126,13 +126,14 @@ rpc_close_pipes(struct inode *inode)
|
|||
{
|
||||
struct rpc_inode *rpci = RPC_I(inode);
|
||||
struct rpc_pipe_ops *ops;
|
||||
int need_release;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
ops = rpci->ops;
|
||||
if (ops != NULL) {
|
||||
LIST_HEAD(free_list);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
need_release = rpci->nreaders != 0 || rpci->nwriters != 0;
|
||||
rpci->nreaders = 0;
|
||||
list_splice_init(&rpci->in_upcall, &free_list);
|
||||
list_splice_init(&rpci->pipe, &free_list);
|
||||
|
@ -141,7 +142,7 @@ rpc_close_pipes(struct inode *inode)
|
|||
spin_unlock(&inode->i_lock);
|
||||
rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
|
||||
rpci->nwriters = 0;
|
||||
if (ops->release_pipe)
|
||||
if (need_release && ops->release_pipe)
|
||||
ops->release_pipe(inode);
|
||||
cancel_delayed_work_sync(&rpci->queue_timeout);
|
||||
}
|
||||
|
@ -169,16 +170,24 @@ static int
|
|||
rpc_pipe_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct rpc_inode *rpci = RPC_I(inode);
|
||||
int first_open;
|
||||
int res = -ENXIO;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
if (rpci->ops != NULL) {
|
||||
if (filp->f_mode & FMODE_READ)
|
||||
rpci->nreaders ++;
|
||||
if (filp->f_mode & FMODE_WRITE)
|
||||
rpci->nwriters ++;
|
||||
res = 0;
|
||||
if (rpci->ops == NULL)
|
||||
goto out;
|
||||
first_open = rpci->nreaders == 0 && rpci->nwriters == 0;
|
||||
if (first_open && rpci->ops->open_pipe) {
|
||||
res = rpci->ops->open_pipe(inode);
|
||||
if (res)
|
||||
goto out;
|
||||
}
|
||||
if (filp->f_mode & FMODE_READ)
|
||||
rpci->nreaders++;
|
||||
if (filp->f_mode & FMODE_WRITE)
|
||||
rpci->nwriters++;
|
||||
res = 0;
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return res;
|
||||
}
|
||||
|
@ -188,6 +197,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
|
|||
{
|
||||
struct rpc_inode *rpci = RPC_I(inode);
|
||||
struct rpc_pipe_msg *msg;
|
||||
int last_close;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
if (rpci->ops == NULL)
|
||||
|
@ -214,7 +224,8 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
|
|||
rpci->ops->destroy_msg, -EAGAIN);
|
||||
}
|
||||
}
|
||||
if (rpci->ops->release_pipe)
|
||||
last_close = rpci->nwriters == 0 && rpci->nreaders == 0;
|
||||
if (last_close && rpci->ops->release_pipe)
|
||||
rpci->ops->release_pipe(inode);
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
@ -396,6 +407,7 @@ enum {
|
|||
RPCAUTH_nfs,
|
||||
RPCAUTH_portmap,
|
||||
RPCAUTH_statd,
|
||||
RPCAUTH_nfsd4_cb,
|
||||
RPCAUTH_RootEOF
|
||||
};
|
||||
|
||||
|
@ -429,6 +441,10 @@ static struct rpc_filelist files[] = {
|
|||
.name = "statd",
|
||||
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
|
||||
},
|
||||
[RPCAUTH_nfsd4_cb] = {
|
||||
.name = "nfsd4_cb",
|
||||
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
|
||||
},
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -748,7 +764,7 @@ rpc_rmdir(struct dentry *dentry)
|
|||
* @name: name of pipe
|
||||
* @private: private data to associate with the pipe, for the caller's use
|
||||
* @ops: operations defining the behavior of the pipe: upcall, downcall,
|
||||
* release_pipe, and destroy_msg.
|
||||
* release_pipe, open_pipe, and destroy_msg.
|
||||
* @flags: rpc_inode flags
|
||||
*
|
||||
* Data is made available for userspace to read by calls to
|
||||
|
@ -808,7 +824,7 @@ rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pi
|
|||
-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_mkpipe);
|
||||
EXPORT_SYMBOL_GPL(rpc_mkpipe);
|
||||
|
||||
/**
|
||||
* rpc_unlink - remove a pipe
|
||||
|
@ -839,7 +855,7 @@ rpc_unlink(struct dentry *dentry)
|
|||
dput(parent);
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL(rpc_unlink);
|
||||
EXPORT_SYMBOL_GPL(rpc_unlink);
|
||||
|
||||
/*
|
||||
* populate the filesystem
|
||||
|
|
|
@ -28,7 +28,7 @@ xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
|
|||
memcpy(p, obj->data, obj->len);
|
||||
return p + XDR_QUADLEN(obj->len);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_encode_netobj);
|
||||
EXPORT_SYMBOL_GPL(xdr_encode_netobj);
|
||||
|
||||
__be32 *
|
||||
xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
|
||||
|
@ -41,7 +41,7 @@ xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
|
|||
obj->data = (u8 *) p;
|
||||
return p + XDR_QUADLEN(len);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_decode_netobj);
|
||||
EXPORT_SYMBOL_GPL(xdr_decode_netobj);
|
||||
|
||||
/**
|
||||
* xdr_encode_opaque_fixed - Encode fixed length opaque data
|
||||
|
@ -71,7 +71,7 @@ __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
|
|||
}
|
||||
return p;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_encode_opaque_fixed);
|
||||
EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
|
||||
|
||||
/**
|
||||
* xdr_encode_opaque - Encode variable length opaque data
|
||||
|
@ -86,14 +86,14 @@ __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
|
|||
*p++ = htonl(nbytes);
|
||||
return xdr_encode_opaque_fixed(p, ptr, nbytes);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_encode_opaque);
|
||||
EXPORT_SYMBOL_GPL(xdr_encode_opaque);
|
||||
|
||||
__be32 *
|
||||
xdr_encode_string(__be32 *p, const char *string)
|
||||
{
|
||||
return xdr_encode_array(p, string, strlen(string));
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_encode_string);
|
||||
EXPORT_SYMBOL_GPL(xdr_encode_string);
|
||||
|
||||
__be32 *
|
||||
xdr_decode_string_inplace(__be32 *p, char **sp,
|
||||
|
@ -108,7 +108,7 @@ xdr_decode_string_inplace(__be32 *p, char **sp,
|
|||
*sp = (char *) p;
|
||||
return p + XDR_QUADLEN(len);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_decode_string_inplace);
|
||||
EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
|
||||
|
||||
void
|
||||
xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
|
||||
|
@ -136,7 +136,7 @@ xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
|
|||
xdr->buflen += len;
|
||||
xdr->len += len;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_encode_pages);
|
||||
EXPORT_SYMBOL_GPL(xdr_encode_pages);
|
||||
|
||||
void
|
||||
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
|
||||
|
@ -158,7 +158,7 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
|
|||
|
||||
xdr->buflen += len;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_inline_pages);
|
||||
EXPORT_SYMBOL_GPL(xdr_inline_pages);
|
||||
|
||||
/*
|
||||
* Helper routines for doing 'memmove' like operations on a struct xdr_buf
|
||||
|
@ -428,7 +428,7 @@ xdr_shift_buf(struct xdr_buf *buf, size_t len)
|
|||
{
|
||||
xdr_shrink_bufhead(buf, len);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_shift_buf);
|
||||
EXPORT_SYMBOL_GPL(xdr_shift_buf);
|
||||
|
||||
/**
|
||||
* xdr_init_encode - Initialize a struct xdr_stream for sending data.
|
||||
|
@ -465,7 +465,7 @@ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
|
|||
iov->iov_len += len;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_init_encode);
|
||||
EXPORT_SYMBOL_GPL(xdr_init_encode);
|
||||
|
||||
/**
|
||||
* xdr_reserve_space - Reserve buffer space for sending
|
||||
|
@ -492,7 +492,7 @@ __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
|
|||
xdr->buf->len += nbytes;
|
||||
return p;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_reserve_space);
|
||||
EXPORT_SYMBOL_GPL(xdr_reserve_space);
|
||||
|
||||
/**
|
||||
* xdr_write_pages - Insert a list of pages into an XDR buffer for sending
|
||||
|
@ -527,7 +527,7 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
|
|||
buf->buflen += len;
|
||||
buf->len += len;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_write_pages);
|
||||
EXPORT_SYMBOL_GPL(xdr_write_pages);
|
||||
|
||||
/**
|
||||
* xdr_init_decode - Initialize an xdr_stream for decoding data.
|
||||
|
@ -547,7 +547,7 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
|
|||
xdr->p = p;
|
||||
xdr->end = (__be32 *)((char *)iov->iov_base + len);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_init_decode);
|
||||
EXPORT_SYMBOL_GPL(xdr_init_decode);
|
||||
|
||||
/**
|
||||
* xdr_inline_decode - Retrieve non-page XDR data to decode
|
||||
|
@ -569,7 +569,7 @@ __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
|
|||
xdr->p = q;
|
||||
return p;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_inline_decode);
|
||||
EXPORT_SYMBOL_GPL(xdr_inline_decode);
|
||||
|
||||
/**
|
||||
* xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
|
||||
|
@ -613,7 +613,7 @@ void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
|
|||
xdr->p = (__be32 *)((char *)iov->iov_base + padding);
|
||||
xdr->end = (__be32 *)((char *)iov->iov_base + end);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_read_pages);
|
||||
EXPORT_SYMBOL_GPL(xdr_read_pages);
|
||||
|
||||
/**
|
||||
* xdr_enter_page - decode data from the XDR page
|
||||
|
@ -638,7 +638,7 @@ void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
|
|||
xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
|
||||
xdr->end = (__be32 *)((char *)xdr->p + len);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_enter_page);
|
||||
EXPORT_SYMBOL_GPL(xdr_enter_page);
|
||||
|
||||
static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
|
||||
|
||||
|
@ -650,7 +650,7 @@ xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
|
|||
buf->page_len = 0;
|
||||
buf->buflen = buf->len = iov->iov_len;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_buf_from_iov);
|
||||
EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
|
||||
|
||||
/* Sets subbuf to the portion of buf of length len beginning base bytes
|
||||
* from the start of buf. Returns -1 if base of length are out of bounds. */
|
||||
|
@ -699,7 +699,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
|
|||
return -1;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_buf_subsegment);
|
||||
EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
|
||||
|
||||
static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
|
||||
{
|
||||
|
@ -730,7 +730,7 @@ int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, u
|
|||
__read_bytes_from_xdr_buf(&subbuf, obj, len);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(read_bytes_from_xdr_buf);
|
||||
EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
|
||||
|
||||
static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
|
||||
{
|
||||
|
@ -774,7 +774,7 @@ xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
|
|||
*obj = ntohl(raw);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_decode_word);
|
||||
EXPORT_SYMBOL_GPL(xdr_decode_word);
|
||||
|
||||
int
|
||||
xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
|
||||
|
@ -783,7 +783,7 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
|
|||
|
||||
return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_encode_word);
|
||||
EXPORT_SYMBOL_GPL(xdr_encode_word);
|
||||
|
||||
/* If the netobj starting offset bytes from the start of xdr_buf is contained
|
||||
* entirely in the head or the tail, set object to point to it; otherwise
|
||||
|
@ -821,7 +821,7 @@ int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned in
|
|||
__read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_buf_read_netobj);
|
||||
EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
|
||||
|
||||
/* Returns 0 on success, or else a negative error code. */
|
||||
static int
|
||||
|
@ -1027,7 +1027,7 @@ xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
|
|||
|
||||
return xdr_xcode_array2(buf, base, desc, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_decode_array2);
|
||||
EXPORT_SYMBOL_GPL(xdr_decode_array2);
|
||||
|
||||
int
|
||||
xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
|
||||
|
@ -1039,7 +1039,7 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
|
|||
|
||||
return xdr_xcode_array2(buf, base, desc, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_encode_array2);
|
||||
EXPORT_SYMBOL_GPL(xdr_encode_array2);
|
||||
|
||||
int
|
||||
xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
|
||||
|
@ -1106,5 +1106,5 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
|
|||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(xdr_process_buf);
|
||||
EXPORT_SYMBOL_GPL(xdr_process_buf);
|
||||
|
||||
|
|
Loading…
Reference in New Issue