svc: Move connection limit checking to its own function

Move the code that poaches connections when the connection limit is hit
to a subroutine to make the accept logic path easier to follow. Since this
is in the new connection path, it should not be a performance issue.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
This commit is contained in:
Tom Tucker 2007-12-30 21:07:40 -06:00 committed by J. Bruce Fields
parent 44a6995b32
commit f9f3cc4fae
1 changed files with 29 additions and 28 deletions

View File

@ -1105,17 +1105,30 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
svc_sock_received(newsvsk); svc_sock_received(newsvsk);
/* make sure that we don't have too many active connections. if (serv->sv_stats)
* If we have, something must be dropped. serv->sv_stats->nettcpconn++;
return &newsvsk->sk_xprt;
failed:
sock_release(newsock);
return NULL;
}
/*
* Make sure that we don't have too many active connections. If we
* have, something must be dropped.
* *
* There's no point in trying to do random drop here for * There's no point in trying to do random drop here for DoS
* DoS prevention. The NFS clients does 1 reconnect in 15 * prevention. The NFS clients does 1 reconnect in 15 seconds. An
* seconds. An attacker can easily beat that. * attacker can easily beat that.
* *
* The only somewhat efficient mechanism would be if drop * The only somewhat efficient mechanism would be if drop old
* old connections from the same IP first. But right now * connections from the same IP first. But right now we don't even
* we don't even record the client IP in svc_sock. * record the client IP in svc_sock.
*/ */
static void svc_check_conn_limits(struct svc_serv *serv)
{
if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
struct svc_sock *svsk = NULL; struct svc_sock *svsk = NULL;
spin_lock_bh(&serv->sv_lock); spin_lock_bh(&serv->sv_lock);
@ -1126,10 +1139,6 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
"sockets, consider increasing the " "sockets, consider increasing the "
"number of nfsd threads\n", "number of nfsd threads\n",
serv->sv_name); serv->sv_name);
printk(KERN_NOTICE
"%s: last TCP connect from %s\n",
serv->sv_name, __svc_print_addr(sin,
buf, sizeof(buf)));
} }
/* /*
* Always select the oldest socket. It's not fair, * Always select the oldest socket. It's not fair,
@ -1147,17 +1156,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
svc_sock_enqueue(svsk); svc_sock_enqueue(svsk);
svc_sock_put(svsk); svc_sock_put(svsk);
} }
} }
if (serv->sv_stats)
serv->sv_stats->nettcpconn++;
return &newsvsk->sk_xprt;
failed:
sock_release(newsock);
return NULL;
} }
/* /*
@ -1574,6 +1573,8 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
} else if (test_bit(SK_LISTENER, &svsk->sk_flags)) { } else if (test_bit(SK_LISTENER, &svsk->sk_flags)) {
struct svc_xprt *newxpt; struct svc_xprt *newxpt;
newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt); newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt);
if (newxpt)
svc_check_conn_limits(svsk->sk_server);
svc_sock_received(svsk); svc_sock_received(svsk);
} else { } else {
dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n", dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",