2005-08-10 11:11:08 +08:00
|
|
|
/*
|
|
|
|
* INET An implementation of the TCP/IP protocol suite for the LINUX
|
|
|
|
* operating system. INET is implemented using the BSD Socket
|
|
|
|
* interface as the means of communication with the user level.
|
|
|
|
*
|
|
|
|
* Support for INET connection oriented protocols.
|
|
|
|
*
|
|
|
|
* Authors: See the TCP sources
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or(at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/jhash.h>
|
|
|
|
|
|
|
|
#include <net/inet_connection_sock.h>
|
|
|
|
#include <net/inet_hashtables.h>
|
|
|
|
#include <net/inet_timewait_sock.h>
|
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/route.h>
|
|
|
|
#include <net/tcp_states.h>
|
2005-08-10 11:15:09 +08:00
|
|
|
#include <net/xfrm.h>
|
2005-08-10 11:11:08 +08:00
|
|
|
|
|
|
|
#ifdef INET_CSK_DEBUG
|
|
|
|
const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
|
|
|
|
EXPORT_SYMBOL(inet_csk_timer_bug_msg);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
2008-10-09 05:18:04 +08:00
|
|
|
* This struct holds the first and last local port number.
|
2005-08-10 11:11:08 +08:00
|
|
|
*/
|
2008-10-09 05:18:04 +08:00
|
|
|
struct local_ports sysctl_local_ports __read_mostly = {
|
2011-05-24 20:08:08 +08:00
|
|
|
.lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
|
2008-10-09 05:18:04 +08:00
|
|
|
.range = { 32768, 61000 },
|
|
|
|
};
|
2007-10-11 08:30:46 +08:00
|
|
|
|
2010-05-05 08:27:06 +08:00
|
|
|
unsigned long *sysctl_local_reserved_ports;
|
|
|
|
EXPORT_SYMBOL(sysctl_local_reserved_ports);
|
|
|
|
|
2007-10-11 08:30:46 +08:00
|
|
|
void inet_get_local_port_range(int *low, int *high)
|
|
|
|
{
|
2012-04-15 13:58:06 +08:00
|
|
|
unsigned int seq;
|
|
|
|
|
2007-10-11 08:30:46 +08:00
|
|
|
do {
|
2008-10-09 05:18:04 +08:00
|
|
|
seq = read_seqbegin(&sysctl_local_ports.lock);
|
2007-10-11 08:30:46 +08:00
|
|
|
|
2008-10-09 05:18:04 +08:00
|
|
|
*low = sysctl_local_ports.range[0];
|
|
|
|
*high = sysctl_local_ports.range[1];
|
|
|
|
} while (read_seqretry(&sysctl_local_ports.lock, seq));
|
2007-10-11 08:30:46 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_get_local_port_range);
|
2005-08-10 11:11:08 +08:00
|
|
|
|
2005-12-14 15:14:47 +08:00
|
|
|
int inet_csk_bind_conflict(const struct sock *sk,
|
tcp: bind() use stronger condition for bind_conflict
We must try harder to get unique (addr, port) pairs when
doing port autoselection for sockets with SO_REUSEADDR
option set.
We achieve this by adding a relaxation parameter to
inet_csk_bind_conflict. When 'relax' parameter is off
we return a conflict whenever the current searched
pair (addr, port) is not unique.
This tries to address the problems reported in patch:
8d238b25b1ec22a73b1c2206f111df2faaff8285
Revert "tcp: bind() fix when many ports are bound"
Tests where ran for creating and binding(0) many sockets
on 100 IPs. The results are, on average:
* 60000 sockets, 600 ports / IP:
* 0.210 s, 620 (IP, port) duplicates without patch
* 0.219 s, no duplicates with patch
* 100000 sockets, 1000 ports / IP:
* 0.371 s, 1720 duplicates without patch
* 0.373 s, no duplicates with patch
* 200000 sockets, 2000 ports / IP:
* 0.766 s, 6900 duplicates without patch
* 0.768 s, no duplicates with patch
* 500000 sockets, 5000 ports / IP:
* 2.227 s, 41500 duplicates without patch
* 2.284 s, no duplicates with patch
Signed-off-by: Alex Copot <alex.mihai.c@gmail.com>
Signed-off-by: Daniel Baluta <dbaluta@ixiacom.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-04-13 06:21:45 +08:00
|
|
|
const struct inet_bind_bucket *tb, bool relax)
|
2005-08-10 11:11:08 +08:00
|
|
|
{
|
|
|
|
struct sock *sk2;
|
|
|
|
int reuse = sk->sk_reuse;
|
2013-01-22 17:50:24 +08:00
|
|
|
int reuseport = sk->sk_reuseport;
|
|
|
|
kuid_t uid = sock_i_uid((struct sock *)sk);
|
2005-08-10 11:11:08 +08:00
|
|
|
|
2008-04-14 17:42:27 +08:00
|
|
|
/*
|
|
|
|
* Unlike other sk lookup places we do not check
|
|
|
|
* for sk_net here, since _all_ the socks listed
|
|
|
|
* in tb->owners list belong to the same net - the
|
|
|
|
* one this bucket belongs to.
|
|
|
|
*/
|
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
sk_for_each_bound(sk2, &tb->owners) {
|
2005-08-10 11:11:08 +08:00
|
|
|
if (sk != sk2 &&
|
|
|
|
!inet_v6_ipv6only(sk2) &&
|
|
|
|
(!sk->sk_bound_dev_if ||
|
|
|
|
!sk2->sk_bound_dev_if ||
|
|
|
|
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
|
2013-01-22 17:50:24 +08:00
|
|
|
if ((!reuse || !sk2->sk_reuse ||
|
|
|
|
sk2->sk_state == TCP_LISTEN) &&
|
|
|
|
(!reuseport || !sk2->sk_reuseport ||
|
|
|
|
(sk2->sk_state != TCP_TIME_WAIT &&
|
|
|
|
!uid_eq(uid, sock_i_uid(sk2))))) {
|
net: optimize INET input path further
Followup of commit b178bb3dfc30 (net: reorder struct sock fields)
Optimize INET input path a bit further, by :
1) moving sk_refcnt close to sk_lock.
This reduces number of dirtied cache lines by one on 64bit arches (and
64 bytes cache line size).
2) moving inet_daddr & inet_rcv_saddr at the beginning of sk
(same cache line than hash / family / bound_dev_if / nulls_node)
This reduces number of accessed cache lines in lookups by one, and dont
increase size of inet and timewait socks.
inet and tw sockets now share same place-holder for these fields.
Before patch :
offsetof(struct sock, sk_refcnt) = 0x10
offsetof(struct sock, sk_lock) = 0x40
offsetof(struct sock, sk_receive_queue) = 0x60
offsetof(struct inet_sock, inet_daddr) = 0x270
offsetof(struct inet_sock, inet_rcv_saddr) = 0x274
After patch :
offsetof(struct sock, sk_refcnt) = 0x44
offsetof(struct sock, sk_lock) = 0x48
offsetof(struct sock, sk_receive_queue) = 0x68
offsetof(struct inet_sock, inet_daddr) = 0x0
offsetof(struct inet_sock, inet_rcv_saddr) = 0x4
compute_score() (udp or tcp) now use a single cache line per ignored
item, instead of two.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2010-12-01 03:04:07 +08:00
|
|
|
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
|
|
|
|
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
|
|
|
|
sk2_rcv_saddr == sk_rcv_saddr(sk))
|
2005-08-10 11:11:08 +08:00
|
|
|
break;
|
2010-04-29 02:25:59 +08:00
|
|
|
}
|
tcp: bind() use stronger condition for bind_conflict
We must try harder to get unique (addr, port) pairs when
doing port autoselection for sockets with SO_REUSEADDR
option set.
We achieve this by adding a relaxation parameter to
inet_csk_bind_conflict. When 'relax' parameter is off
we return a conflict whenever the current searched
pair (addr, port) is not unique.
This tries to address the problems reported in patch:
8d238b25b1ec22a73b1c2206f111df2faaff8285
Revert "tcp: bind() fix when many ports are bound"
Tests where ran for creating and binding(0) many sockets
on 100 IPs. The results are, on average:
* 60000 sockets, 600 ports / IP:
* 0.210 s, 620 (IP, port) duplicates without patch
* 0.219 s, no duplicates with patch
* 100000 sockets, 1000 ports / IP:
* 0.371 s, 1720 duplicates without patch
* 0.373 s, no duplicates with patch
* 200000 sockets, 2000 ports / IP:
* 0.766 s, 6900 duplicates without patch
* 0.768 s, no duplicates with patch
* 500000 sockets, 5000 ports / IP:
* 2.227 s, 41500 duplicates without patch
* 2.284 s, no duplicates with patch
Signed-off-by: Alex Copot <alex.mihai.c@gmail.com>
Signed-off-by: Daniel Baluta <dbaluta@ixiacom.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-04-13 06:21:45 +08:00
|
|
|
if (!relax && reuse && sk2->sk_reuse &&
|
|
|
|
sk2->sk_state != TCP_LISTEN) {
|
|
|
|
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
|
|
|
|
|
|
|
|
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
|
|
|
|
sk2_rcv_saddr == sk_rcv_saddr(sk))
|
|
|
|
break;
|
|
|
|
}
|
2005-08-10 11:11:08 +08:00
|
|
|
}
|
|
|
|
}
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
return sk2 != NULL;
|
2005-08-10 11:11:08 +08:00
|
|
|
}
|
2005-12-14 15:14:47 +08:00
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
|
|
|
|
|
2005-08-10 11:11:08 +08:00
|
|
|
/* Obtain a reference to a local port for the given sock,
|
|
|
|
* if snum is zero it means select any available local port.
|
|
|
|
*/
|
[SOCK] proto: Add hashinfo member to struct proto
This way we can remove TCP and DCCP specific versions of
sk->sk_prot->get_port: both v4 and v6 use inet_csk_get_port
sk->sk_prot->hash: inet_hash is directly used, only v6 need
a specific version to deal with mapped sockets
sk->sk_prot->unhash: both v4 and v6 use inet_hash directly
struct inet_connection_sock_af_ops also gets a new member, bind_conflict, so
that inet_csk_get_port can find the per family routine.
Now only the lookup routines receive as a parameter a struct inet_hashtable.
With this we further reuse code, reducing the difference among INET transport
protocols.
Eventually work has to be done on UDP and SCTP to make them share this
infrastructure and get as a bonus inet_diag interfaces so that iproute can be
used with these protocols.
net-2.6/net/ipv4/inet_hashtables.c:
struct proto | +8
struct inet_connection_sock_af_ops | +8
2 structs changed
__inet_hash_nolisten | +18
__inet_hash | -210
inet_put_port | +8
inet_bind_bucket_create | +1
__inet_hash_connect | -8
5 functions changed, 27 bytes added, 218 bytes removed, diff: -191
net-2.6/net/core/sock.c:
proto_seq_show | +3
1 function changed, 3 bytes added, diff: +3
net-2.6/net/ipv4/inet_connection_sock.c:
inet_csk_get_port | +15
1 function changed, 15 bytes added, diff: +15
net-2.6/net/ipv4/tcp.c:
tcp_set_state | -7
1 function changed, 7 bytes removed, diff: -7
net-2.6/net/ipv4/tcp_ipv4.c:
tcp_v4_get_port | -31
tcp_v4_hash | -48
tcp_v4_destroy_sock | -7
tcp_v4_syn_recv_sock | -2
tcp_unhash | -179
5 functions changed, 267 bytes removed, diff: -267
net-2.6/net/ipv6/inet6_hashtables.c:
__inet6_hash | +8
1 function changed, 8 bytes added, diff: +8
net-2.6/net/ipv4/inet_hashtables.c:
inet_unhash | +190
inet_hash | +242
2 functions changed, 432 bytes added, diff: +432
vmlinux:
16 functions changed, 485 bytes added, 492 bytes removed, diff: -7
/home/acme/git/net-2.6/net/ipv6/tcp_ipv6.c:
tcp_v6_get_port | -31
tcp_v6_hash | -7
tcp_v6_syn_recv_sock | -9
3 functions changed, 47 bytes removed, diff: -47
/home/acme/git/net-2.6/net/dccp/proto.c:
dccp_destroy_sock | -7
dccp_unhash | -179
dccp_hash | -49
dccp_set_state | -7
dccp_done | +1
5 functions changed, 1 bytes added, 242 bytes removed, diff: -241
/home/acme/git/net-2.6/net/dccp/ipv4.c:
dccp_v4_get_port | -31
dccp_v4_request_recv_sock | -2
2 functions changed, 33 bytes removed, diff: -33
/home/acme/git/net-2.6/net/dccp/ipv6.c:
dccp_v6_get_port | -31
dccp_v6_hash | -7
dccp_v6_request_recv_sock | +5
3 functions changed, 5 bytes added, 38 bytes removed, diff: -33
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-02-03 20:06:04 +08:00
|
|
|
int inet_csk_get_port(struct sock *sk, unsigned short snum)
|
2005-08-10 11:11:08 +08:00
|
|
|
{
|
2008-03-23 07:50:58 +08:00
|
|
|
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
|
2005-08-10 11:11:08 +08:00
|
|
|
struct inet_bind_hashbucket *head;
|
|
|
|
struct inet_bind_bucket *tb;
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
int ret, attempts = 5;
|
2008-03-26 01:26:21 +08:00
|
|
|
struct net *net = sock_net(sk);
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
int smallest_size = -1, smallest_rover;
|
2013-01-22 17:50:24 +08:00
|
|
|
kuid_t uid = sock_i_uid(sk);
|
2005-08-10 11:11:08 +08:00
|
|
|
|
|
|
|
local_bh_disable();
|
|
|
|
if (!snum) {
|
2007-10-11 08:30:46 +08:00
|
|
|
int remaining, rover, low, high;
|
|
|
|
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
again:
|
2007-10-11 08:30:46 +08:00
|
|
|
inet_get_local_port_range(&low, &high);
|
2007-10-19 13:00:17 +08:00
|
|
|
remaining = (high - low) + 1;
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
smallest_rover = rover = net_random() % remaining + low;
|
2005-08-10 11:11:08 +08:00
|
|
|
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
smallest_size = -1;
|
2005-08-10 11:11:08 +08:00
|
|
|
do {
|
2010-05-05 08:27:06 +08:00
|
|
|
if (inet_is_reserved_local_port(rover))
|
|
|
|
goto next_nolock;
|
2008-06-17 08:12:49 +08:00
|
|
|
head = &hashinfo->bhash[inet_bhashfn(net, rover,
|
|
|
|
hashinfo->bhash_size)];
|
2005-08-10 11:11:08 +08:00
|
|
|
spin_lock(&head->lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
inet_bind_bucket_for_each(tb, &head->chain)
|
2009-11-26 07:14:13 +08:00
|
|
|
if (net_eq(ib_net(tb), net) && tb->port == rover) {
|
2013-01-22 17:50:24 +08:00
|
|
|
if (((tb->fastreuse > 0 &&
|
|
|
|
sk->sk_reuse &&
|
|
|
|
sk->sk_state != TCP_LISTEN) ||
|
|
|
|
(tb->fastreuseport > 0 &&
|
|
|
|
sk->sk_reuseport &&
|
|
|
|
uid_eq(tb->fastuid, uid))) &&
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
(tb->num_owners < smallest_size || smallest_size == -1)) {
|
|
|
|
smallest_size = tb->num_owners;
|
|
|
|
smallest_rover = rover;
|
tcp: bind() use stronger condition for bind_conflict
We must try harder to get unique (addr, port) pairs when
doing port autoselection for sockets with SO_REUSEADDR
option set.
We achieve this by adding a relaxation parameter to
inet_csk_bind_conflict. When 'relax' parameter is off
we return a conflict whenever the current searched
pair (addr, port) is not unique.
This tries to address the problems reported in patch:
8d238b25b1ec22a73b1c2206f111df2faaff8285
Revert "tcp: bind() fix when many ports are bound"
Tests where ran for creating and binding(0) many sockets
on 100 IPs. The results are, on average:
* 60000 sockets, 600 ports / IP:
* 0.210 s, 620 (IP, port) duplicates without patch
* 0.219 s, no duplicates with patch
* 100000 sockets, 1000 ports / IP:
* 0.371 s, 1720 duplicates without patch
* 0.373 s, no duplicates with patch
* 200000 sockets, 2000 ports / IP:
* 0.766 s, 6900 duplicates without patch
* 0.768 s, no duplicates with patch
* 500000 sockets, 5000 ports / IP:
* 2.227 s, 41500 duplicates without patch
* 2.284 s, no duplicates with patch
Signed-off-by: Alex Copot <alex.mihai.c@gmail.com>
Signed-off-by: Daniel Baluta <dbaluta@ixiacom.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-04-13 06:21:45 +08:00
|
|
|
if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
|
|
|
|
!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
|
2010-04-29 02:25:59 +08:00
|
|
|
snum = smallest_rover;
|
2012-01-25 16:34:52 +08:00
|
|
|
goto tb_found;
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
}
|
|
|
|
}
|
tcp: bind() use stronger condition for bind_conflict
We must try harder to get unique (addr, port) pairs when
doing port autoselection for sockets with SO_REUSEADDR
option set.
We achieve this by adding a relaxation parameter to
inet_csk_bind_conflict. When 'relax' parameter is off
we return a conflict whenever the current searched
pair (addr, port) is not unique.
This tries to address the problems reported in patch:
8d238b25b1ec22a73b1c2206f111df2faaff8285
Revert "tcp: bind() fix when many ports are bound"
Tests where ran for creating and binding(0) many sockets
on 100 IPs. The results are, on average:
* 60000 sockets, 600 ports / IP:
* 0.210 s, 620 (IP, port) duplicates without patch
* 0.219 s, no duplicates with patch
* 100000 sockets, 1000 ports / IP:
* 0.371 s, 1720 duplicates without patch
* 0.373 s, no duplicates with patch
* 200000 sockets, 2000 ports / IP:
* 0.766 s, 6900 duplicates without patch
* 0.768 s, no duplicates with patch
* 500000 sockets, 5000 ports / IP:
* 2.227 s, 41500 duplicates without patch
* 2.284 s, no duplicates with patch
Signed-off-by: Alex Copot <alex.mihai.c@gmail.com>
Signed-off-by: Daniel Baluta <dbaluta@ixiacom.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-04-13 06:21:45 +08:00
|
|
|
if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
|
2012-01-25 16:34:51 +08:00
|
|
|
snum = rover;
|
2012-01-25 16:34:52 +08:00
|
|
|
goto tb_found;
|
2012-01-25 16:34:51 +08:00
|
|
|
}
|
2005-08-10 11:11:08 +08:00
|
|
|
goto next;
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
}
|
2005-08-10 11:11:08 +08:00
|
|
|
break;
|
|
|
|
next:
|
|
|
|
spin_unlock(&head->lock);
|
2010-05-05 08:27:06 +08:00
|
|
|
next_nolock:
|
2005-11-04 08:33:23 +08:00
|
|
|
if (++rover > high)
|
|
|
|
rover = low;
|
2005-08-10 11:11:08 +08:00
|
|
|
} while (--remaining > 0);
|
|
|
|
|
|
|
|
/* Exhausted local port range during search? It is not
|
|
|
|
* possible for us to be holding one of the bind hash
|
|
|
|
* locks if this test triggers, because if 'remaining'
|
|
|
|
* drops to zero, we broke out of the do/while loop at
|
|
|
|
* the top level, not from the 'break;' statement.
|
|
|
|
*/
|
|
|
|
ret = 1;
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
if (remaining <= 0) {
|
|
|
|
if (smallest_size != -1) {
|
|
|
|
snum = smallest_rover;
|
|
|
|
goto have_snum;
|
|
|
|
}
|
2005-08-10 11:11:08 +08:00
|
|
|
goto fail;
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
}
|
2005-08-10 11:11:08 +08:00
|
|
|
/* OK, here is the one we will use. HEAD is
|
|
|
|
* non-NULL and we hold it's mutex.
|
|
|
|
*/
|
|
|
|
snum = rover;
|
|
|
|
} else {
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
have_snum:
|
2008-06-17 08:12:49 +08:00
|
|
|
head = &hashinfo->bhash[inet_bhashfn(net, snum,
|
|
|
|
hashinfo->bhash_size)];
|
2005-08-10 11:11:08 +08:00
|
|
|
spin_lock(&head->lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
inet_bind_bucket_for_each(tb, &head->chain)
|
2009-11-26 07:14:13 +08:00
|
|
|
if (net_eq(ib_net(tb), net) && tb->port == snum)
|
2005-08-10 11:11:08 +08:00
|
|
|
goto tb_found;
|
|
|
|
}
|
|
|
|
tb = NULL;
|
|
|
|
goto tb_not_found;
|
|
|
|
tb_found:
|
|
|
|
if (!hlist_empty(&tb->owners)) {
|
2012-04-19 11:39:36 +08:00
|
|
|
if (sk->sk_reuse == SK_FORCE_REUSE)
|
|
|
|
goto success;
|
|
|
|
|
2013-01-22 17:50:24 +08:00
|
|
|
if (((tb->fastreuse > 0 &&
|
|
|
|
sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
|
|
|
|
(tb->fastreuseport > 0 &&
|
|
|
|
sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
smallest_size == -1) {
|
2005-08-10 11:11:08 +08:00
|
|
|
goto success;
|
|
|
|
} else {
|
|
|
|
ret = 1;
|
tcp: bind() use stronger condition for bind_conflict
We must try harder to get unique (addr, port) pairs when
doing port autoselection for sockets with SO_REUSEADDR
option set.
We achieve this by adding a relaxation parameter to
inet_csk_bind_conflict. When 'relax' parameter is off
we return a conflict whenever the current searched
pair (addr, port) is not unique.
This tries to address the problems reported in patch:
8d238b25b1ec22a73b1c2206f111df2faaff8285
Revert "tcp: bind() fix when many ports are bound"
Tests where ran for creating and binding(0) many sockets
on 100 IPs. The results are, on average:
* 60000 sockets, 600 ports / IP:
* 0.210 s, 620 (IP, port) duplicates without patch
* 0.219 s, no duplicates with patch
* 100000 sockets, 1000 ports / IP:
* 0.371 s, 1720 duplicates without patch
* 0.373 s, no duplicates with patch
* 200000 sockets, 2000 ports / IP:
* 0.766 s, 6900 duplicates without patch
* 0.768 s, no duplicates with patch
* 500000 sockets, 5000 ports / IP:
* 2.227 s, 41500 duplicates without patch
* 2.284 s, no duplicates with patch
Signed-off-by: Alex Copot <alex.mihai.c@gmail.com>
Signed-off-by: Daniel Baluta <dbaluta@ixiacom.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-04-13 06:21:45 +08:00
|
|
|
if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
|
2013-01-22 17:50:24 +08:00
|
|
|
if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
|
2013-01-26 15:50:54 +08:00
|
|
|
(tb->fastreuseport > 0 &&
|
|
|
|
sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
|
2009-02-01 17:40:17 +08:00
|
|
|
smallest_size != -1 && --attempts >= 0) {
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
spin_unlock(&head->lock);
|
|
|
|
goto again;
|
|
|
|
}
|
tcp: bind() use stronger condition for bind_conflict
We must try harder to get unique (addr, port) pairs when
doing port autoselection for sockets with SO_REUSEADDR
option set.
We achieve this by adding a relaxation parameter to
inet_csk_bind_conflict. When 'relax' parameter is off
we return a conflict whenever the current searched
pair (addr, port) is not unique.
This tries to address the problems reported in patch:
8d238b25b1ec22a73b1c2206f111df2faaff8285
Revert "tcp: bind() fix when many ports are bound"
Tests where ran for creating and binding(0) many sockets
on 100 IPs. The results are, on average:
* 60000 sockets, 600 ports / IP:
* 0.210 s, 620 (IP, port) duplicates without patch
* 0.219 s, no duplicates with patch
* 100000 sockets, 1000 ports / IP:
* 0.371 s, 1720 duplicates without patch
* 0.373 s, no duplicates with patch
* 200000 sockets, 2000 ports / IP:
* 0.766 s, 6900 duplicates without patch
* 0.768 s, no duplicates with patch
* 500000 sockets, 5000 ports / IP:
* 2.227 s, 41500 duplicates without patch
* 2.284 s, no duplicates with patch
Signed-off-by: Alex Copot <alex.mihai.c@gmail.com>
Signed-off-by: Daniel Baluta <dbaluta@ixiacom.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-04-13 06:21:45 +08:00
|
|
|
|
2005-08-10 11:11:08 +08:00
|
|
|
goto fail_unlock;
|
inet: Allowing more than 64k connections and heavily optimize bind(0) time.
With simple extension to the binding mechanism, which allows to bind more
than 64k sockets (or smaller amount, depending on sysctl parameters),
we have to traverse the whole bind hash table to find out empty bucket.
And while it is not a problem for example for 32k connections, bind()
completion time grows exponentially (since after each successful binding
we have to traverse one bucket more to find empty one) even if we start
each time from random offset inside the hash table.
So, when hash table is full, and we want to add another socket, we have
to traverse the whole table no matter what, so effectivelly this will be
the worst case performance and it will be constant.
Attached picture shows bind() time depending on number of already bound
sockets.
Green area corresponds to the usual binding to zero port process, which
turns on kernel port selection as described above. Red area is the bind
process, when number of reuse-bound sockets is not limited by 64k (or
sysctl parameters). The same exponential growth (hidden by the green
area) before number of ports reaches sysctl limit.
At this time bind hash table has exactly one reuse-enbaled socket in a
bucket, but it is possible that they have different addresses. Actually
kernel selects the first port to try randomly, so at the beginning bind
will take roughly constant time, but with time number of port to check
after random start will increase. And that will have exponential growth,
but because of above random selection, not every next port selection
will necessary take longer time than previous. So we have to consider
the area below in the graph (if you could zoom it, you could find, that
there are many different times placed there), so area can hide another.
Blue area corresponds to the port selection optimization.
This is rather simple design approach: hashtable now maintains (unprecise
and racely updated) number of currently bound sockets, and when number
of such sockets becomes greater than predefined value (I use maximum
port range defined by sysctls), we stop traversing the whole bind hash
table and just stop at first matching bucket after random start. Above
limit roughly corresponds to the case, when bind hash table is full and
we turned on mechanism of allowing to bind more reuse-enabled sockets,
so it does not change behaviour of other sockets.
Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Tested-by: Denys Fedoryschenko <denys@visp.net.lb>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-01-20 08:46:02 +08:00
|
|
|
}
|
2005-08-10 11:11:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
tb_not_found:
|
|
|
|
ret = 1;
|
2008-01-31 21:05:50 +08:00
|
|
|
if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
|
|
|
|
net, head, snum)) == NULL)
|
2005-08-10 11:11:08 +08:00
|
|
|
goto fail_unlock;
|
|
|
|
if (hlist_empty(&tb->owners)) {
|
|
|
|
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
|
|
|
|
tb->fastreuse = 1;
|
|
|
|
else
|
|
|
|
tb->fastreuse = 0;
|
2013-01-22 17:50:24 +08:00
|
|
|
if (sk->sk_reuseport) {
|
|
|
|
tb->fastreuseport = 1;
|
|
|
|
tb->fastuid = uid;
|
2013-01-26 15:50:54 +08:00
|
|
|
} else
|
2013-01-22 17:50:24 +08:00
|
|
|
tb->fastreuseport = 0;
|
|
|
|
} else {
|
|
|
|
if (tb->fastreuse &&
|
|
|
|
(!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
|
|
|
|
tb->fastreuse = 0;
|
|
|
|
if (tb->fastreuseport &&
|
2013-01-26 15:50:54 +08:00
|
|
|
(!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)))
|
2013-01-22 17:50:24 +08:00
|
|
|
tb->fastreuseport = 0;
|
|
|
|
}
|
2005-08-10 11:11:08 +08:00
|
|
|
success:
|
|
|
|
if (!inet_csk(sk)->icsk_bind_hash)
|
|
|
|
inet_bind_hash(sk, tb, snum);
|
2008-07-26 12:43:18 +08:00
|
|
|
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
|
2007-02-09 22:24:47 +08:00
|
|
|
ret = 0;
|
2005-08-10 11:11:08 +08:00
|
|
|
|
|
|
|
fail_unlock:
|
|
|
|
spin_unlock(&head->lock);
|
|
|
|
fail:
|
|
|
|
local_bh_enable();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_get_port);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for an incoming connection, avoid race conditions. This must be called
|
|
|
|
* with the socket locked.
|
|
|
|
*/
|
|
|
|
static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
|
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* True wake-one mechanism for incoming connections: only
|
|
|
|
* one process gets woken up, not the 'whole herd'.
|
|
|
|
* Since we do not 'race & poll' for established sockets
|
|
|
|
* anymore, the common case will execute the loop only once.
|
|
|
|
*
|
|
|
|
* Subtle issue: "add_wait_queue_exclusive()" will be added
|
|
|
|
* after any current non-exclusive waiters, and we know that
|
|
|
|
* it will always _stay_ after any new non-exclusive waiters
|
|
|
|
* because all non-exclusive waiters are added at the
|
|
|
|
* beginning of the wait-queue. As such, it's ok to "drop"
|
|
|
|
* our exclusiveness temporarily when we get woken up without
|
|
|
|
* having to remove and re-insert us on the wait queue.
|
|
|
|
*/
|
|
|
|
for (;;) {
|
2010-04-20 21:03:51 +08:00
|
|
|
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
|
2005-08-10 11:11:08 +08:00
|
|
|
TASK_INTERRUPTIBLE);
|
|
|
|
release_sock(sk);
|
|
|
|
if (reqsk_queue_empty(&icsk->icsk_accept_queue))
|
|
|
|
timeo = schedule_timeout(timeo);
|
|
|
|
lock_sock(sk);
|
|
|
|
err = 0;
|
|
|
|
if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
|
|
|
|
break;
|
|
|
|
err = -EINVAL;
|
|
|
|
if (sk->sk_state != TCP_LISTEN)
|
|
|
|
break;
|
|
|
|
err = sock_intr_errno(timeo);
|
|
|
|
if (signal_pending(current))
|
|
|
|
break;
|
|
|
|
err = -EAGAIN;
|
|
|
|
if (!timeo)
|
|
|
|
break;
|
|
|
|
}
|
2010-04-20 21:03:51 +08:00
|
|
|
finish_wait(sk_sleep(sk), &wait);
|
2005-08-10 11:11:08 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This will accept the next outstanding connection.
|
|
|
|
*/
|
|
|
|
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
|
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
2012-08-31 20:29:12 +08:00
|
|
|
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
|
2005-08-10 11:11:08 +08:00
|
|
|
struct sock *newsk;
|
2012-08-31 20:29:12 +08:00
|
|
|
struct request_sock *req;
|
2005-08-10 11:11:08 +08:00
|
|
|
int error;
|
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
|
|
|
|
/* We need to make sure that this socket is listening,
|
|
|
|
* and that it has something pending.
|
|
|
|
*/
|
|
|
|
error = -EINVAL;
|
|
|
|
if (sk->sk_state != TCP_LISTEN)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
/* Find already established connection */
|
2012-08-31 20:29:12 +08:00
|
|
|
if (reqsk_queue_empty(queue)) {
|
2005-08-10 11:11:08 +08:00
|
|
|
long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
|
|
|
|
|
|
|
|
/* If this is a non blocking socket don't sleep */
|
|
|
|
error = -EAGAIN;
|
|
|
|
if (!timeo)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
error = inet_csk_wait_for_connect(sk, timeo);
|
|
|
|
if (error)
|
|
|
|
goto out_err;
|
|
|
|
}
|
2012-08-31 20:29:12 +08:00
|
|
|
req = reqsk_queue_remove(queue);
|
|
|
|
newsk = req->sk;
|
|
|
|
|
|
|
|
sk_acceptq_removed(sk);
|
2012-09-06 16:07:13 +08:00
|
|
|
if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) {
|
2012-08-31 20:29:12 +08:00
|
|
|
spin_lock_bh(&queue->fastopenq->lock);
|
|
|
|
if (tcp_rsk(req)->listener) {
|
|
|
|
/* We are still waiting for the final ACK from 3WHS
|
|
|
|
* so can't free req now. Instead, we set req->sk to
|
|
|
|
* NULL to signify that the child socket is taken
|
|
|
|
* so reqsk_fastopen_remove() will free the req
|
|
|
|
* when 3WHS finishes (or is aborted).
|
|
|
|
*/
|
|
|
|
req->sk = NULL;
|
|
|
|
req = NULL;
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&queue->fastopenq->lock);
|
|
|
|
}
|
2005-08-10 11:11:08 +08:00
|
|
|
out:
|
|
|
|
release_sock(sk);
|
2012-08-31 20:29:12 +08:00
|
|
|
if (req)
|
|
|
|
__reqsk_free(req);
|
2005-08-10 11:11:08 +08:00
|
|
|
return newsk;
|
|
|
|
out_err:
|
|
|
|
newsk = NULL;
|
2012-08-31 20:29:12 +08:00
|
|
|
req = NULL;
|
2005-08-10 11:11:08 +08:00
|
|
|
*err = error;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_accept);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Using different timers for retransmit, delayed acks and probes
|
2007-02-09 22:24:47 +08:00
|
|
|
* We may wish use just one timer maintaining a list of expire jiffies
|
2005-08-10 11:11:08 +08:00
|
|
|
* to optimize.
|
|
|
|
*/
|
|
|
|
void inet_csk_init_xmit_timers(struct sock *sk,
|
|
|
|
void (*retransmit_handler)(unsigned long),
|
|
|
|
void (*delack_handler)(unsigned long),
|
|
|
|
void (*keepalive_handler)(unsigned long))
|
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
|
2008-01-24 13:20:07 +08:00
|
|
|
setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
|
|
|
|
(unsigned long)sk);
|
|
|
|
setup_timer(&icsk->icsk_delack_timer, delack_handler,
|
|
|
|
(unsigned long)sk);
|
|
|
|
setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
|
2005-08-10 11:11:08 +08:00
|
|
|
icsk->icsk_pending = icsk->icsk_ack.pending = 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_init_xmit_timers);
|
|
|
|
|
|
|
|
void inet_csk_clear_xmit_timers(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
|
|
|
|
icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
|
|
|
|
|
|
|
|
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
|
|
|
|
sk_stop_timer(sk, &icsk->icsk_delack_timer);
|
|
|
|
sk_stop_timer(sk, &sk->sk_timer);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
|
|
|
|
|
|
|
|
void inet_csk_delete_keepalive_timer(struct sock *sk)
|
|
|
|
{
|
|
|
|
sk_stop_timer(sk, &sk->sk_timer);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
|
|
|
|
|
|
|
|
void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
|
|
|
|
{
|
|
|
|
sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
|
|
|
|
|
2008-11-03 16:23:42 +08:00
|
|
|
struct dst_entry *inet_csk_route_req(struct sock *sk,
|
2011-05-19 06:32:03 +08:00
|
|
|
struct flowi4 *fl4,
|
2012-07-18 05:02:46 +08:00
|
|
|
const struct request_sock *req)
|
2005-08-10 11:11:08 +08:00
|
|
|
{
|
|
|
|
struct rtable *rt;
|
|
|
|
const struct inet_request_sock *ireq = inet_rsk(req);
|
2011-04-21 17:45:37 +08:00
|
|
|
struct ip_options_rcu *opt = inet_rsk(req)->opt;
|
2008-07-17 11:19:08 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2012-07-10 19:01:57 +08:00
|
|
|
int flags = inet_sk_flowi_flags(sk);
|
2005-08-10 11:11:08 +08:00
|
|
|
|
2011-05-19 06:32:03 +08:00
|
|
|
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
|
2011-03-31 19:53:20 +08:00
|
|
|
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
|
tcp: do not create inetpeer on SYNACK message
Another problem on SYNFLOOD/DDOS attack is the inetpeer cache getting
larger and larger, using lots of memory and cpu time.
tcp_v4_send_synack()
->inet_csk_route_req()
->ip_route_output_flow()
->rt_set_nexthop()
->rt_init_metrics()
->inet_getpeer( create = true)
This is a side effect of commit a4daad6b09230 (net: Pre-COW metrics for
TCP) added in 2.6.39
Possible solution :
Instruct inet_csk_route_req() to remove FLOWI_FLAG_PRECOW_METRICS
Before patch :
# grep peer /proc/slabinfo
inet_peer_cache 4175430 4175430 192 42 2 : tunables 0 0 0 : slabdata 99415 99415 0
Samples: 41K of event 'cycles', Event count (approx.): 30716565122
+ 20,24% ksoftirqd/0 [kernel.kallsyms] [k] inet_getpeer
+ 8,19% ksoftirqd/0 [kernel.kallsyms] [k] peer_avl_rebalance.isra.1
+ 4,81% ksoftirqd/0 [kernel.kallsyms] [k] sha_transform
+ 3,64% ksoftirqd/0 [kernel.kallsyms] [k] fib_table_lookup
+ 2,36% ksoftirqd/0 [ixgbe] [k] ixgbe_poll
+ 2,16% ksoftirqd/0 [kernel.kallsyms] [k] __ip_route_output_key
+ 2,11% ksoftirqd/0 [kernel.kallsyms] [k] kernel_map_pages
+ 2,11% ksoftirqd/0 [kernel.kallsyms] [k] ip_route_input_common
+ 2,01% ksoftirqd/0 [kernel.kallsyms] [k] __inet_lookup_established
+ 1,83% ksoftirqd/0 [kernel.kallsyms] [k] md5_transform
+ 1,75% ksoftirqd/0 [kernel.kallsyms] [k] check_leaf.isra.9
+ 1,49% ksoftirqd/0 [kernel.kallsyms] [k] ipt_do_table
+ 1,46% ksoftirqd/0 [kernel.kallsyms] [k] hrtimer_interrupt
+ 1,45% ksoftirqd/0 [kernel.kallsyms] [k] kmem_cache_alloc
+ 1,29% ksoftirqd/0 [kernel.kallsyms] [k] inet_csk_search_req
+ 1,29% ksoftirqd/0 [kernel.kallsyms] [k] __netif_receive_skb
+ 1,16% ksoftirqd/0 [kernel.kallsyms] [k] copy_user_generic_string
+ 1,15% ksoftirqd/0 [kernel.kallsyms] [k] kmem_cache_free
+ 1,02% ksoftirqd/0 [kernel.kallsyms] [k] tcp_make_synack
+ 0,93% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_lock_bh
+ 0,87% ksoftirqd/0 [kernel.kallsyms] [k] __call_rcu
+ 0,84% ksoftirqd/0 [kernel.kallsyms] [k] rt_garbage_collect
+ 0,84% ksoftirqd/0 [kernel.kallsyms] [k] fib_rules_lookup
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Hans Schillstrom <hans.schillstrom@ericsson.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-06-01 05:00:26 +08:00
|
|
|
sk->sk_protocol,
|
2012-06-20 13:02:19 +08:00
|
|
|
flags,
|
2011-04-21 17:45:37 +08:00
|
|
|
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
|
2011-03-31 19:53:20 +08:00
|
|
|
ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
|
2011-05-19 06:32:03 +08:00
|
|
|
security_req_classify_flow(req, flowi4_to_flowi(fl4));
|
|
|
|
rt = ip_route_output_flow(net, fl4, sk);
|
2011-03-03 06:31:35 +08:00
|
|
|
if (IS_ERR(rt))
|
2008-12-15 15:13:08 +08:00
|
|
|
goto no_route;
|
2012-10-08 19:41:18 +08:00
|
|
|
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
|
2008-12-15 15:13:08 +08:00
|
|
|
goto route_err;
|
2010-06-11 14:31:35 +08:00
|
|
|
return &rt->dst;
|
2008-12-15 15:13:08 +08:00
|
|
|
|
|
|
|
route_err:
|
|
|
|
ip_rt_put(rt);
|
|
|
|
no_route:
|
|
|
|
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
|
|
|
|
return NULL;
|
2005-08-10 11:11:08 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_route_req);
|
|
|
|
|
2011-05-09 05:34:22 +08:00
|
|
|
struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
|
|
|
|
struct sock *newsk,
|
|
|
|
const struct request_sock *req)
|
|
|
|
{
|
|
|
|
const struct inet_request_sock *ireq = inet_rsk(req);
|
|
|
|
struct inet_sock *newinet = inet_sk(newsk);
|
2012-08-20 10:52:09 +08:00
|
|
|
struct ip_options_rcu *opt;
|
2011-05-09 05:34:22 +08:00
|
|
|
struct net *net = sock_net(sk);
|
|
|
|
struct flowi4 *fl4;
|
|
|
|
struct rtable *rt;
|
|
|
|
|
|
|
|
fl4 = &newinet->cork.fl.u.ip4;
|
2012-08-20 10:52:09 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
opt = rcu_dereference(newinet->inet_opt);
|
2011-05-09 05:34:22 +08:00
|
|
|
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
|
|
|
|
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
|
|
|
|
sk->sk_protocol, inet_sk_flowi_flags(sk),
|
|
|
|
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
|
|
|
|
ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
|
|
|
|
security_req_classify_flow(req, flowi4_to_flowi(fl4));
|
|
|
|
rt = ip_route_output_flow(net, fl4, sk);
|
|
|
|
if (IS_ERR(rt))
|
|
|
|
goto no_route;
|
2012-10-08 19:41:18 +08:00
|
|
|
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
|
2011-05-09 05:34:22 +08:00
|
|
|
goto route_err;
|
2012-08-20 10:52:09 +08:00
|
|
|
rcu_read_unlock();
|
2011-05-09 05:34:22 +08:00
|
|
|
return &rt->dst;
|
|
|
|
|
|
|
|
route_err:
|
|
|
|
ip_rt_put(rt);
|
|
|
|
no_route:
|
2012-08-20 10:52:09 +08:00
|
|
|
rcu_read_unlock();
|
2011-05-09 05:34:22 +08:00
|
|
|
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
|
|
|
|
|
2006-09-28 09:36:59 +08:00
|
|
|
static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
|
2006-11-16 18:30:37 +08:00
|
|
|
const u32 rnd, const u32 synq_hsize)
|
2005-08-10 11:11:08 +08:00
|
|
|
{
|
2006-09-28 09:36:59 +08:00
|
|
|
return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
|
2005-08-10 11:11:08 +08:00
|
|
|
}
|
|
|
|
|
2011-12-10 17:48:31 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2005-08-10 11:11:08 +08:00
|
|
|
#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
|
|
|
|
#else
|
|
|
|
#define AF_INET_FAMILY(fam) 1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
struct request_sock *inet_csk_search_req(const struct sock *sk,
|
|
|
|
struct request_sock ***prevp,
|
2006-09-28 09:36:59 +08:00
|
|
|
const __be16 rport, const __be32 raddr,
|
2006-09-28 09:27:47 +08:00
|
|
|
const __be32 laddr)
|
2005-08-10 11:11:08 +08:00
|
|
|
{
|
|
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
|
|
|
|
struct request_sock *req, **prev;
|
|
|
|
|
|
|
|
for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
|
|
|
|
lopt->nr_table_entries)];
|
|
|
|
(req = *prev) != NULL;
|
|
|
|
prev = &req->dl_next) {
|
|
|
|
const struct inet_request_sock *ireq = inet_rsk(req);
|
|
|
|
|
|
|
|
if (ireq->rmt_port == rport &&
|
|
|
|
ireq->rmt_addr == raddr &&
|
|
|
|
ireq->loc_addr == laddr &&
|
|
|
|
AF_INET_FAMILY(req->rsk_ops->family)) {
|
2008-07-26 12:43:18 +08:00
|
|
|
WARN_ON(req->sk);
|
2005-08-10 11:11:08 +08:00
|
|
|
*prevp = prev;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_search_req);
|
|
|
|
|
|
|
|
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
|
2005-12-14 15:15:12 +08:00
|
|
|
unsigned long timeout)
|
2005-08-10 11:11:08 +08:00
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
|
|
|
|
const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
|
|
|
|
lopt->hash_rnd, lopt->nr_table_entries);
|
|
|
|
|
|
|
|
reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
|
|
|
|
inet_csk_reqsk_queue_added(sk, timeout);
|
|
|
|
}
|
2010-07-10 05:22:10 +08:00
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
|
2005-08-10 11:11:08 +08:00
|
|
|
|
2005-08-10 11:15:09 +08:00
|
|
|
/* Only thing we need from tcp.h */
|
|
|
|
extern int sysctl_tcp_synack_retries;
|
|
|
|
|
2005-08-10 11:11:24 +08:00
|
|
|
|
2009-10-19 18:03:58 +08:00
|
|
|
/* Decide when to expire the request and when to resend SYN-ACK */
|
|
|
|
static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
|
|
|
|
const int max_retries,
|
|
|
|
const u8 rskq_defer_accept,
|
|
|
|
int *expire, int *resend)
|
|
|
|
{
|
|
|
|
if (!rskq_defer_accept) {
|
2012-10-28 07:16:46 +08:00
|
|
|
*expire = req->num_timeout >= thresh;
|
2009-10-19 18:03:58 +08:00
|
|
|
*resend = 1;
|
|
|
|
return;
|
|
|
|
}
|
2012-10-28 07:16:46 +08:00
|
|
|
*expire = req->num_timeout >= thresh &&
|
|
|
|
(!inet_rsk(req)->acked || req->num_timeout >= max_retries);
|
2009-10-19 18:03:58 +08:00
|
|
|
/*
|
|
|
|
* Do not resend while waiting for data after ACK,
|
|
|
|
* start to resend on end of deferring period to give
|
|
|
|
* last chance for data or ACK to create established socket.
|
|
|
|
*/
|
|
|
|
*resend = !inet_rsk(req)->acked ||
|
2012-10-28 07:16:46 +08:00
|
|
|
req->num_timeout >= rskq_defer_accept - 1;
|
2009-10-19 18:03:58 +08:00
|
|
|
}
|
|
|
|
|
2012-10-28 07:16:46 +08:00
|
|
|
int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
|
|
|
|
{
|
2013-03-17 16:23:34 +08:00
|
|
|
int err = req->rsk_ops->rtx_syn_ack(parent, req);
|
2012-10-28 07:16:46 +08:00
|
|
|
|
|
|
|
if (!err)
|
|
|
|
req->num_retrans++;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_rtx_syn_ack);
|
|
|
|
|
2005-08-10 11:15:09 +08:00
|
|
|
void inet_csk_reqsk_queue_prune(struct sock *parent,
|
|
|
|
const unsigned long interval,
|
|
|
|
const unsigned long timeout,
|
|
|
|
const unsigned long max_rto)
|
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(parent);
|
|
|
|
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
|
|
|
|
struct listen_sock *lopt = queue->listen_opt;
|
tcp: Revert 'process defer accept as established' changes.
This reverts two changesets, ec3c0982a2dd1e671bad8e9d26c28dcba0039d87
("[TCP]: TCP_DEFER_ACCEPT updates - process as established") and
the follow-on bug fix 9ae27e0adbf471c7a6b80102e38e1d5a346b3b38
("tcp: Fix slab corruption with ipv6 and tcp6fuzz").
This change causes several problems, first reported by Ingo Molnar
as a distcc-over-loopback regression where connections were getting
stuck.
Ilpo Järvinen first spotted the locking problems. The new function
added by this code, tcp_defer_accept_check(), only has the
child socket locked, yet it is modifying state of the parent
listening socket.
Fixing that is non-trivial at best, because we can't simply just grab
the parent listening socket lock at this point, because it would
create an ABBA deadlock. The normal ordering is parent listening
socket --> child socket, but this code path would require the
reverse lock ordering.
Next is a problem noticed by Vitaliy Gusev, he noted:
----------------------------------------
>--- a/net/ipv4/tcp_timer.c
>+++ b/net/ipv4/tcp_timer.c
>@@ -481,6 +481,11 @@ static void tcp_keepalive_timer (unsigned long data)
> goto death;
> }
>
>+ if (tp->defer_tcp_accept.request && sk->sk_state == TCP_ESTABLISHED) {
>+ tcp_send_active_reset(sk, GFP_ATOMIC);
>+ goto death;
Here socket sk is not attached to listening socket's request queue. tcp_done()
will not call inet_csk_destroy_sock() (and tcp_v4_destroy_sock() which should
release this sk) as socket is not DEAD. Therefore socket sk will be lost for
freeing.
----------------------------------------
Finally, Alexey Kuznetsov argues that there might not even be any
real value or advantage to these new semantics even if we fix all
of the bugs:
----------------------------------------
Hiding from accept() sockets with only out-of-order data only
is the only thing which is impossible with old approach. Is this really
so valuable? My opinion: no, this is nothing but a new loophole
to consume memory without control.
----------------------------------------
So revert this thing for now.
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-06-13 07:31:35 +08:00
|
|
|
int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
|
|
|
|
int thresh = max_retries;
|
2005-08-10 11:15:09 +08:00
|
|
|
unsigned long now = jiffies;
|
|
|
|
struct request_sock **reqp, *req;
|
|
|
|
int i, budget;
|
|
|
|
|
|
|
|
if (lopt == NULL || lopt->qlen == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Normally all the openreqs are young and become mature
|
|
|
|
* (i.e. converted to established socket) for first timeout.
|
2012-04-13 03:48:40 +08:00
|
|
|
* If synack was not acknowledged for 1 second, it means
|
2005-08-10 11:15:09 +08:00
|
|
|
* one of the following things: synack was lost, ack was lost,
|
|
|
|
* rtt is high or nobody planned to ack (i.e. synflood).
|
|
|
|
* When server is a bit loaded, queue is populated with old
|
|
|
|
* open requests, reducing effective size of queue.
|
|
|
|
* When server is well loaded, queue size reduces to zero
|
|
|
|
* after several minutes of work. It is not synflood,
|
|
|
|
* it is normal operation. The solution is pruning
|
|
|
|
* too old entries overriding normal timeout, when
|
|
|
|
* situation becomes dangerous.
|
|
|
|
*
|
|
|
|
* Essentially, we reserve half of room for young
|
|
|
|
* embrions; and abort old ones without pity, if old
|
|
|
|
* ones are about to clog our table.
|
|
|
|
*/
|
|
|
|
if (lopt->qlen>>(lopt->max_qlen_log-1)) {
|
|
|
|
int young = (lopt->qlen_young<<1);
|
|
|
|
|
|
|
|
while (thresh > 2) {
|
|
|
|
if (lopt->qlen < young)
|
|
|
|
break;
|
|
|
|
thresh--;
|
|
|
|
young <<= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
tcp: Revert 'process defer accept as established' changes.
This reverts two changesets, ec3c0982a2dd1e671bad8e9d26c28dcba0039d87
("[TCP]: TCP_DEFER_ACCEPT updates - process as established") and
the follow-on bug fix 9ae27e0adbf471c7a6b80102e38e1d5a346b3b38
("tcp: Fix slab corruption with ipv6 and tcp6fuzz").
This change causes several problems, first reported by Ingo Molnar
as a distcc-over-loopback regression where connections were getting
stuck.
Ilpo Järvinen first spotted the locking problems. The new function
added by this code, tcp_defer_accept_check(), only has the
child socket locked, yet it is modifying state of the parent
listening socket.
Fixing that is non-trivial at best, because we can't simply just grab
the parent listening socket lock at this point, because it would
create an ABBA deadlock. The normal ordering is parent listening
socket --> child socket, but this code path would require the
reverse lock ordering.
Next is a problem noticed by Vitaliy Gusev, he noted:
----------------------------------------
>--- a/net/ipv4/tcp_timer.c
>+++ b/net/ipv4/tcp_timer.c
>@@ -481,6 +481,11 @@ static void tcp_keepalive_timer (unsigned long data)
> goto death;
> }
>
>+ if (tp->defer_tcp_accept.request && sk->sk_state == TCP_ESTABLISHED) {
>+ tcp_send_active_reset(sk, GFP_ATOMIC);
>+ goto death;
Here socket sk is not attached to listening socket's request queue. tcp_done()
will not call inet_csk_destroy_sock() (and tcp_v4_destroy_sock() which should
release this sk) as socket is not DEAD. Therefore socket sk will be lost for
freeing.
----------------------------------------
Finally, Alexey Kuznetsov argues that there might not even be any
real value or advantage to these new semantics even if we fix all
of the bugs:
----------------------------------------
Hiding from accept() sockets with only out-of-order data only
is the only thing which is impossible with old approach. Is this really
so valuable? My opinion: no, this is nothing but a new loophole
to consume memory without control.
----------------------------------------
So revert this thing for now.
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-06-13 07:31:35 +08:00
|
|
|
if (queue->rskq_defer_accept)
|
|
|
|
max_retries = queue->rskq_defer_accept;
|
|
|
|
|
2005-08-10 11:15:09 +08:00
|
|
|
budget = 2 * (lopt->nr_table_entries / (timeout / interval));
|
|
|
|
i = lopt->clock_hand;
|
|
|
|
|
|
|
|
do {
|
|
|
|
reqp=&lopt->syn_table[i];
|
|
|
|
while ((req = *reqp) != NULL) {
|
|
|
|
if (time_after_eq(now, req->expires)) {
|
2009-10-19 18:03:58 +08:00
|
|
|
int expire = 0, resend = 0;
|
|
|
|
|
|
|
|
syn_ack_recalc(req, thresh, max_retries,
|
|
|
|
queue->rskq_defer_accept,
|
|
|
|
&expire, &resend);
|
2012-04-13 06:16:05 +08:00
|
|
|
req->rsk_ops->syn_ack_timeout(parent, req);
|
2009-10-19 18:03:58 +08:00
|
|
|
if (!expire &&
|
|
|
|
(!resend ||
|
2012-10-28 07:16:46 +08:00
|
|
|
!inet_rtx_syn_ack(parent, req) ||
|
2009-10-19 18:03:58 +08:00
|
|
|
inet_rsk(req)->acked)) {
|
2005-08-10 11:15:09 +08:00
|
|
|
unsigned long timeo;
|
|
|
|
|
2012-10-28 07:16:46 +08:00
|
|
|
if (req->num_timeout++ == 0)
|
2005-08-10 11:15:09 +08:00
|
|
|
lopt->qlen_young--;
|
2012-10-28 07:16:46 +08:00
|
|
|
timeo = min(timeout << req->num_timeout,
|
|
|
|
max_rto);
|
2005-08-10 11:15:09 +08:00
|
|
|
req->expires = now + timeo;
|
|
|
|
reqp = &req->dl_next;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Drop this request */
|
|
|
|
inet_csk_reqsk_queue_unlink(parent, req, reqp);
|
|
|
|
reqsk_queue_removed(queue, req);
|
|
|
|
reqsk_free(req);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
reqp = &req->dl_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
i = (i + 1) & (lopt->nr_table_entries - 1);
|
|
|
|
|
|
|
|
} while (--budget > 0);
|
|
|
|
|
|
|
|
lopt->clock_hand = i;
|
|
|
|
|
|
|
|
if (lopt->qlen)
|
|
|
|
inet_csk_reset_keepalive_timer(parent, interval);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
|
|
|
|
|
2011-11-09 06:07:07 +08:00
|
|
|
/**
|
|
|
|
* inet_csk_clone_lock - clone an inet socket, and lock its clone
|
|
|
|
* @sk: the socket to clone
|
|
|
|
* @req: request_sock
|
|
|
|
* @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
|
|
|
|
*
|
|
|
|
* Caller must unlock socket even in error path (bh_unlock_sock(newsk))
|
|
|
|
*/
|
|
|
|
struct sock *inet_csk_clone_lock(const struct sock *sk,
|
|
|
|
const struct request_sock *req,
|
|
|
|
const gfp_t priority)
|
2005-08-10 11:11:24 +08:00
|
|
|
{
|
2011-11-09 06:07:07 +08:00
|
|
|
struct sock *newsk = sk_clone_lock(sk, priority);
|
2005-08-10 11:11:24 +08:00
|
|
|
|
|
|
|
if (newsk != NULL) {
|
|
|
|
struct inet_connection_sock *newicsk = inet_csk(newsk);
|
|
|
|
|
|
|
|
newsk->sk_state = TCP_SYN_RECV;
|
|
|
|
newicsk->icsk_bind_hash = NULL;
|
|
|
|
|
2009-10-15 14:30:45 +08:00
|
|
|
inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
|
|
|
|
inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
|
|
|
|
inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
|
2005-08-10 11:11:24 +08:00
|
|
|
newsk->sk_write_space = sk_stream_write_space;
|
|
|
|
|
|
|
|
newicsk->icsk_retransmits = 0;
|
2005-08-10 15:03:31 +08:00
|
|
|
newicsk->icsk_backoff = 0;
|
|
|
|
newicsk->icsk_probes_out = 0;
|
2005-08-10 11:11:24 +08:00
|
|
|
|
|
|
|
/* Deinitialize accept_queue to trap illegal accesses. */
|
|
|
|
memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
|
2006-07-25 14:32:50 +08:00
|
|
|
|
|
|
|
security_inet_csk_clone(newsk, req);
|
2005-08-10 11:11:24 +08:00
|
|
|
}
|
|
|
|
return newsk;
|
|
|
|
}
|
2011-11-09 06:07:07 +08:00
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point, there should be no process reference to this
|
|
|
|
* socket, and thus no user references at all. Therefore we
|
|
|
|
* can assume the socket waitqueue is inactive and nobody will
|
|
|
|
* try to jump onto it.
|
|
|
|
*/
|
|
|
|
void inet_csk_destroy_sock(struct sock *sk)
|
|
|
|
{
|
2008-07-26 12:43:18 +08:00
|
|
|
WARN_ON(sk->sk_state != TCP_CLOSE);
|
|
|
|
WARN_ON(!sock_flag(sk, SOCK_DEAD));
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
/* It cannot be in hash table! */
|
2008-07-26 12:43:18 +08:00
|
|
|
WARN_ON(!sk_unhashed(sk));
|
2005-08-10 11:15:09 +08:00
|
|
|
|
2009-10-15 14:30:45 +08:00
|
|
|
/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
|
|
|
|
WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
sk->sk_prot->destroy(sk);
|
|
|
|
|
|
|
|
sk_stream_kill_queues(sk);
|
|
|
|
|
|
|
|
xfrm_sk_free_policy(sk);
|
|
|
|
|
|
|
|
sk_refcnt_debug_release(sk);
|
|
|
|
|
2008-11-26 13:17:14 +08:00
|
|
|
percpu_counter_dec(sk->sk_prot->orphan_count);
|
2005-08-10 11:15:09 +08:00
|
|
|
sock_put(sk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_destroy_sock);
|
|
|
|
|
inet: Fix kmemleak in tcp_v4/6_syn_recv_sock and dccp_v4/6_request_recv_sock
If in either of the above functions inet_csk_route_child_sock() or
__inet_inherit_port() fails, the newsk will not be freed:
unreferenced object 0xffff88022e8a92c0 (size 1592):
comm "softirq", pid 0, jiffies 4294946244 (age 726.160s)
hex dump (first 32 bytes):
0a 01 01 01 0a 01 01 02 00 00 00 00 a7 cc 16 00 ................
02 00 03 01 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace:
[<ffffffff8153d190>] kmemleak_alloc+0x21/0x3e
[<ffffffff810ab3e7>] kmem_cache_alloc+0xb5/0xc5
[<ffffffff8149b65b>] sk_prot_alloc.isra.53+0x2b/0xcd
[<ffffffff8149b784>] sk_clone_lock+0x16/0x21e
[<ffffffff814d711a>] inet_csk_clone_lock+0x10/0x7b
[<ffffffff814ebbc3>] tcp_create_openreq_child+0x21/0x481
[<ffffffff814e8fa5>] tcp_v4_syn_recv_sock+0x3a/0x23b
[<ffffffff814ec5ba>] tcp_check_req+0x29f/0x416
[<ffffffff814e8e10>] tcp_v4_do_rcv+0x161/0x2bc
[<ffffffff814eb917>] tcp_v4_rcv+0x6c9/0x701
[<ffffffff814cea9f>] ip_local_deliver_finish+0x70/0xc4
[<ffffffff814cec20>] ip_local_deliver+0x4e/0x7f
[<ffffffff814ce9f8>] ip_rcv_finish+0x1fc/0x233
[<ffffffff814cee68>] ip_rcv+0x217/0x267
[<ffffffff814a7bbe>] __netif_receive_skb+0x49e/0x553
[<ffffffff814a7cc3>] netif_receive_skb+0x50/0x82
This happens, because sk_clone_lock initializes sk_refcnt to 2, and thus
a single sock_put() is not enough to free the memory. Additionally, things
like xfrm, memcg, cookie_values,... may have been initialized.
We have to free them properly.
This is fixed by forcing a call to tcp_done(), ending up in
inet_csk_destroy_sock, doing the final sock_put(). tcp_done() is necessary,
because it ends up doing all the cleanup on xfrm, memcg, cookie_values,
xfrm,...
Before calling tcp_done, we have to set the socket to SOCK_DEAD, to
force it entering inet_csk_destroy_sock. To avoid the warning in
inet_csk_destroy_sock, inet_num has to be set to 0.
As inet_csk_destroy_sock does a dec on orphan_count, we first have to
increase it.
Calling tcp_done() allows us to remove the calls to
tcp_clear_xmit_timer() and tcp_cleanup_congestion_control().
A similar approach is taken for dccp by calling dccp_done().
This is in the kernel since 093d282321 (tproxy: fix hash locking issue
when using port redirection in __inet_inherit_port()), thus since
version >= 2.6.37.
Signed-off-by: Christoph Paasch <christoph.paasch@uclouvain.be>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-12-14 12:07:58 +08:00
|
|
|
/* This function allows to force a closure of a socket after the call to
|
|
|
|
* tcp/dccp_create_openreq_child().
|
|
|
|
*/
|
|
|
|
void inet_csk_prepare_forced_close(struct sock *sk)
|
2013-03-07 10:34:33 +08:00
|
|
|
__releases(&sk->sk_lock.slock)
|
inet: Fix kmemleak in tcp_v4/6_syn_recv_sock and dccp_v4/6_request_recv_sock
If in either of the above functions inet_csk_route_child_sock() or
__inet_inherit_port() fails, the newsk will not be freed:
unreferenced object 0xffff88022e8a92c0 (size 1592):
comm "softirq", pid 0, jiffies 4294946244 (age 726.160s)
hex dump (first 32 bytes):
0a 01 01 01 0a 01 01 02 00 00 00 00 a7 cc 16 00 ................
02 00 03 01 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace:
[<ffffffff8153d190>] kmemleak_alloc+0x21/0x3e
[<ffffffff810ab3e7>] kmem_cache_alloc+0xb5/0xc5
[<ffffffff8149b65b>] sk_prot_alloc.isra.53+0x2b/0xcd
[<ffffffff8149b784>] sk_clone_lock+0x16/0x21e
[<ffffffff814d711a>] inet_csk_clone_lock+0x10/0x7b
[<ffffffff814ebbc3>] tcp_create_openreq_child+0x21/0x481
[<ffffffff814e8fa5>] tcp_v4_syn_recv_sock+0x3a/0x23b
[<ffffffff814ec5ba>] tcp_check_req+0x29f/0x416
[<ffffffff814e8e10>] tcp_v4_do_rcv+0x161/0x2bc
[<ffffffff814eb917>] tcp_v4_rcv+0x6c9/0x701
[<ffffffff814cea9f>] ip_local_deliver_finish+0x70/0xc4
[<ffffffff814cec20>] ip_local_deliver+0x4e/0x7f
[<ffffffff814ce9f8>] ip_rcv_finish+0x1fc/0x233
[<ffffffff814cee68>] ip_rcv+0x217/0x267
[<ffffffff814a7bbe>] __netif_receive_skb+0x49e/0x553
[<ffffffff814a7cc3>] netif_receive_skb+0x50/0x82
This happens, because sk_clone_lock initializes sk_refcnt to 2, and thus
a single sock_put() is not enough to free the memory. Additionally, things
like xfrm, memcg, cookie_values,... may have been initialized.
We have to free them properly.
This is fixed by forcing a call to tcp_done(), ending up in
inet_csk_destroy_sock, doing the final sock_put(). tcp_done() is necessary,
because it ends up doing all the cleanup on xfrm, memcg, cookie_values,
xfrm,...
Before calling tcp_done, we have to set the socket to SOCK_DEAD, to
force it entering inet_csk_destroy_sock. To avoid the warning in
inet_csk_destroy_sock, inet_num has to be set to 0.
As inet_csk_destroy_sock does a dec on orphan_count, we first have to
increase it.
Calling tcp_done() allows us to remove the calls to
tcp_clear_xmit_timer() and tcp_cleanup_congestion_control().
A similar approach is taken for dccp by calling dccp_done().
This is in the kernel since 093d282321 (tproxy: fix hash locking issue
when using port redirection in __inet_inherit_port()), thus since
version >= 2.6.37.
Signed-off-by: Christoph Paasch <christoph.paasch@uclouvain.be>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-12-14 12:07:58 +08:00
|
|
|
{
|
|
|
|
/* sk_clone_lock locked the socket and set refcnt to 2 */
|
|
|
|
bh_unlock_sock(sk);
|
|
|
|
sock_put(sk);
|
|
|
|
|
|
|
|
/* The below has to be done to allow calling inet_csk_destroy_sock */
|
|
|
|
sock_set_flag(sk, SOCK_DEAD);
|
|
|
|
percpu_counter_inc(sk->sk_prot->orphan_count);
|
|
|
|
inet_sk(sk)->inet_num = 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_prepare_forced_close);
|
|
|
|
|
2005-08-10 11:15:09 +08:00
|
|
|
int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
|
|
|
|
{
|
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
|
|
|
|
|
|
|
|
if (rc != 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
sk->sk_max_ack_backlog = 0;
|
|
|
|
sk->sk_ack_backlog = 0;
|
|
|
|
inet_csk_delack_init(sk);
|
|
|
|
|
|
|
|
/* There is race window here: we announce ourselves listening,
|
|
|
|
* but this transition is still not validated by get_port().
|
|
|
|
* It is OK, because this socket enters to hash table only
|
|
|
|
* after validation is complete.
|
|
|
|
*/
|
|
|
|
sk->sk_state = TCP_LISTEN;
|
2009-10-15 14:30:45 +08:00
|
|
|
if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
|
|
|
|
inet->inet_sport = htons(inet->inet_num);
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
sk_dst_reset(sk);
|
|
|
|
sk->sk_prot->hash(sk);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
sk->sk_state = TCP_CLOSE;
|
|
|
|
__reqsk_queue_destroy(&icsk->icsk_accept_queue);
|
|
|
|
return -EADDRINUSE;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_listen_start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This routine closes sockets which have been at least partially
|
|
|
|
* opened, but not yet accepted.
|
|
|
|
*/
|
|
|
|
void inet_csk_listen_stop(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
2012-08-31 20:29:12 +08:00
|
|
|
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
|
2005-08-10 11:15:09 +08:00
|
|
|
struct request_sock *acc_req;
|
|
|
|
struct request_sock *req;
|
|
|
|
|
|
|
|
inet_csk_delete_keepalive_timer(sk);
|
|
|
|
|
|
|
|
/* make all the listen_opt local to us */
|
2012-08-31 20:29:12 +08:00
|
|
|
acc_req = reqsk_queue_yank_acceptq(queue);
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
/* Following specs, it would be better either to send FIN
|
|
|
|
* (and enter FIN-WAIT-1, it is normal close)
|
|
|
|
* or to send active reset (abort).
|
|
|
|
* Certainly, it is pretty dangerous while synflood, but it is
|
|
|
|
* bad justification for our negligence 8)
|
|
|
|
* To be honest, we are not able to make either
|
|
|
|
* of the variants now. --ANK
|
|
|
|
*/
|
2012-08-31 20:29:12 +08:00
|
|
|
reqsk_queue_destroy(queue);
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
while ((req = acc_req) != NULL) {
|
|
|
|
struct sock *child = req->sk;
|
|
|
|
|
|
|
|
acc_req = req->dl_next;
|
|
|
|
|
|
|
|
local_bh_disable();
|
|
|
|
bh_lock_sock(child);
|
2008-07-26 12:43:18 +08:00
|
|
|
WARN_ON(sock_owned_by_user(child));
|
2005-08-10 11:15:09 +08:00
|
|
|
sock_hold(child);
|
|
|
|
|
|
|
|
sk->sk_prot->disconnect(child, O_NONBLOCK);
|
|
|
|
|
|
|
|
sock_orphan(child);
|
|
|
|
|
2008-12-30 15:04:08 +08:00
|
|
|
percpu_counter_inc(sk->sk_prot->orphan_count);
|
|
|
|
|
2012-09-06 16:07:13 +08:00
|
|
|
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) {
|
2012-08-31 20:29:12 +08:00
|
|
|
BUG_ON(tcp_sk(child)->fastopen_rsk != req);
|
|
|
|
BUG_ON(sk != tcp_rsk(req)->listener);
|
|
|
|
|
|
|
|
/* Paranoid, to prevent race condition if
|
|
|
|
* an inbound pkt destined for child is
|
|
|
|
* blocked by sock lock in tcp_v4_rcv().
|
|
|
|
* Also to satisfy an assertion in
|
|
|
|
* tcp_v4_destroy_sock().
|
|
|
|
*/
|
|
|
|
tcp_sk(child)->fastopen_rsk = NULL;
|
|
|
|
sock_put(sk);
|
|
|
|
}
|
2005-08-10 11:15:09 +08:00
|
|
|
inet_csk_destroy_sock(child);
|
|
|
|
|
|
|
|
bh_unlock_sock(child);
|
|
|
|
local_bh_enable();
|
|
|
|
sock_put(child);
|
|
|
|
|
|
|
|
sk_acceptq_removed(sk);
|
|
|
|
__reqsk_free(req);
|
|
|
|
}
|
2012-08-31 20:29:12 +08:00
|
|
|
if (queue->fastopenq != NULL) {
|
|
|
|
/* Free all the reqs queued in rskq_rst_head. */
|
|
|
|
spin_lock_bh(&queue->fastopenq->lock);
|
|
|
|
acc_req = queue->fastopenq->rskq_rst_head;
|
|
|
|
queue->fastopenq->rskq_rst_head = NULL;
|
|
|
|
spin_unlock_bh(&queue->fastopenq->lock);
|
|
|
|
while ((req = acc_req) != NULL) {
|
|
|
|
acc_req = req->dl_next;
|
|
|
|
__reqsk_free(req);
|
|
|
|
}
|
|
|
|
}
|
2008-07-26 12:43:18 +08:00
|
|
|
WARN_ON(sk->sk_ack_backlog);
|
2005-08-10 11:15:09 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
|
2005-12-14 15:16:04 +08:00
|
|
|
|
|
|
|
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
|
|
|
|
{
|
|
|
|
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
|
|
|
|
const struct inet_sock *inet = inet_sk(sk);
|
|
|
|
|
|
|
|
sin->sin_family = AF_INET;
|
2009-10-15 14:30:45 +08:00
|
|
|
sin->sin_addr.s_addr = inet->inet_daddr;
|
|
|
|
sin->sin_port = inet->inet_dport;
|
2005-12-14 15:16:04 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
|
2006-03-21 14:01:03 +08:00
|
|
|
|
2006-03-21 14:46:16 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
2006-03-21 14:52:32 +08:00
|
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
2006-03-21 14:46:16 +08:00
|
|
|
|
|
|
|
if (icsk->icsk_af_ops->compat_getsockopt != NULL)
|
|
|
|
return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
|
|
|
|
optval, optlen);
|
|
|
|
return icsk->icsk_af_ops->getsockopt(sk, level, optname,
|
|
|
|
optval, optlen);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
|
|
|
|
|
|
|
|
int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *optval, unsigned int optlen)
|
2006-03-21 14:46:16 +08:00
|
|
|
{
|
2006-03-21 14:52:32 +08:00
|
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
2006-03-21 14:46:16 +08:00
|
|
|
|
|
|
|
if (icsk->icsk_af_ops->compat_setsockopt != NULL)
|
|
|
|
return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
|
|
|
|
optval, optlen);
|
|
|
|
return icsk->icsk_af_ops->setsockopt(sk, level, optname,
|
|
|
|
optval, optlen);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
|
|
|
|
#endif
|
2012-07-16 18:28:06 +08:00
|
|
|
|
|
|
|
static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
|
|
|
|
{
|
2012-07-18 04:42:13 +08:00
|
|
|
const struct inet_sock *inet = inet_sk(sk);
|
|
|
|
const struct ip_options_rcu *inet_opt;
|
2012-07-16 18:28:06 +08:00
|
|
|
__be32 daddr = inet->inet_daddr;
|
|
|
|
struct flowi4 *fl4;
|
|
|
|
struct rtable *rt;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
inet_opt = rcu_dereference(inet->inet_opt);
|
|
|
|
if (inet_opt && inet_opt->opt.srr)
|
|
|
|
daddr = inet_opt->opt.faddr;
|
|
|
|
fl4 = &fl->u.ip4;
|
|
|
|
rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
|
|
|
|
inet->inet_saddr, inet->inet_dport,
|
|
|
|
inet->inet_sport, sk->sk_protocol,
|
|
|
|
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
|
|
|
|
if (IS_ERR(rt))
|
|
|
|
rt = NULL;
|
|
|
|
if (rt)
|
|
|
|
sk_setup_caps(sk, &rt->dst);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return &rt->dst;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
|
|
|
|
{
|
|
|
|
struct dst_entry *dst = __sk_dst_check(sk, 0);
|
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
|
|
|
|
|
|
if (!dst) {
|
|
|
|
dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
|
|
|
|
if (!dst)
|
|
|
|
goto out;
|
|
|
|
}
|
2012-07-17 18:29:28 +08:00
|
|
|
dst->ops->update_pmtu(dst, sk, NULL, mtu);
|
2012-07-16 18:28:06 +08:00
|
|
|
|
|
|
|
dst = __sk_dst_check(sk, 0);
|
|
|
|
if (!dst)
|
|
|
|
dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
|
|
|
|
out:
|
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
|