mirror of https://gitee.com/openkylin/linux.git
svc: Move connection limit checking to its own function
Move the code that poaches connections when the connection limit is hit to a subroutine to make the accept logic path easier to follow. Since this is in the new connection path, it should not be a performance issue. Signed-off-by: Tom Tucker <tom@opengridcomputing.com> Acked-by: Neil Brown <neilb@suse.de> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Greg Banks <gnb@sgi.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
This commit is contained in:
parent
44a6995b32
commit
f9f3cc4fae
|
@ -1105,17 +1105,30 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
|
||||||
|
|
||||||
svc_sock_received(newsvsk);
|
svc_sock_received(newsvsk);
|
||||||
|
|
||||||
/* make sure that we don't have too many active connections.
|
if (serv->sv_stats)
|
||||||
* If we have, something must be dropped.
|
serv->sv_stats->nettcpconn++;
|
||||||
*
|
|
||||||
* There's no point in trying to do random drop here for
|
return &newsvsk->sk_xprt;
|
||||||
* DoS prevention. The NFS clients does 1 reconnect in 15
|
|
||||||
* seconds. An attacker can easily beat that.
|
failed:
|
||||||
*
|
sock_release(newsock);
|
||||||
* The only somewhat efficient mechanism would be if drop
|
return NULL;
|
||||||
* old connections from the same IP first. But right now
|
}
|
||||||
* we don't even record the client IP in svc_sock.
|
|
||||||
*/
|
/*
|
||||||
|
* Make sure that we don't have too many active connections. If we
|
||||||
|
* have, something must be dropped.
|
||||||
|
*
|
||||||
|
* There's no point in trying to do random drop here for DoS
|
||||||
|
* prevention. The NFS clients does 1 reconnect in 15 seconds. An
|
||||||
|
* attacker can easily beat that.
|
||||||
|
*
|
||||||
|
* The only somewhat efficient mechanism would be if drop old
|
||||||
|
* connections from the same IP first. But right now we don't even
|
||||||
|
* record the client IP in svc_sock.
|
||||||
|
*/
|
||||||
|
static void svc_check_conn_limits(struct svc_serv *serv)
|
||||||
|
{
|
||||||
if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
|
if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
|
||||||
struct svc_sock *svsk = NULL;
|
struct svc_sock *svsk = NULL;
|
||||||
spin_lock_bh(&serv->sv_lock);
|
spin_lock_bh(&serv->sv_lock);
|
||||||
|
@ -1123,13 +1136,9 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
|
||||||
if (net_ratelimit()) {
|
if (net_ratelimit()) {
|
||||||
/* Try to help the admin */
|
/* Try to help the admin */
|
||||||
printk(KERN_NOTICE "%s: too many open TCP "
|
printk(KERN_NOTICE "%s: too many open TCP "
|
||||||
"sockets, consider increasing the "
|
"sockets, consider increasing the "
|
||||||
"number of nfsd threads\n",
|
"number of nfsd threads\n",
|
||||||
serv->sv_name);
|
serv->sv_name);
|
||||||
printk(KERN_NOTICE
|
|
||||||
"%s: last TCP connect from %s\n",
|
|
||||||
serv->sv_name, __svc_print_addr(sin,
|
|
||||||
buf, sizeof(buf)));
|
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Always select the oldest socket. It's not fair,
|
* Always select the oldest socket. It's not fair,
|
||||||
|
@ -1147,17 +1156,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
|
||||||
svc_sock_enqueue(svsk);
|
svc_sock_enqueue(svsk);
|
||||||
svc_sock_put(svsk);
|
svc_sock_put(svsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (serv->sv_stats)
|
|
||||||
serv->sv_stats->nettcpconn++;
|
|
||||||
|
|
||||||
return &newsvsk->sk_xprt;
|
|
||||||
|
|
||||||
failed:
|
|
||||||
sock_release(newsock);
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1574,6 +1573,8 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
|
||||||
} else if (test_bit(SK_LISTENER, &svsk->sk_flags)) {
|
} else if (test_bit(SK_LISTENER, &svsk->sk_flags)) {
|
||||||
struct svc_xprt *newxpt;
|
struct svc_xprt *newxpt;
|
||||||
newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt);
|
newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt);
|
||||||
|
if (newxpt)
|
||||||
|
svc_check_conn_limits(svsk->sk_server);
|
||||||
svc_sock_received(svsk);
|
svc_sock_received(svsk);
|
||||||
} else {
|
} else {
|
||||||
dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
|
dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
|
||||||
|
|
Loading…
Reference in New Issue