ipv4: tcp: remove per net tcp_sock

tcp_v4_send_reset() and tcp_v4_send_ack() use a single socket
per network namespace.

This leads to bad behavior on multiqueue NICS, because many cpus
contend for the socket lock and once socket lock is acquired, extra
false sharing on various socket fields slow down the operations.

To better resist to attacks, we use a percpu socket. Each cpu can
run without contention, using appropriate memory (local node)

Additional features :

1) We also mirror the queue_mapping of the incoming skb, so that
answers use the same queue if possible.

2) Setting SOCK_USE_WRITE_QUEUE socket flag speedup sock_wfree()

3) We now limit the number of in-flight RST/ACK [1] packets
per cpu, instead of per namespace, and we honor the sysctl_wmem_default
limit dynamically. (Prior to this patch, sysctl_wmem_default value was
copied at boot time, so any further change would not affect tcp_sock
limit)

[1] These packets are only generated when no socket was matched for
the incoming packet.

Reported-by: Bill Sommerfeld <wsommerfeld@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2012-07-19 07:34:03 +00:00 committed by David S. Miller
parent aee06da672
commit be9f4a44e7
4 changed files with 36 additions and 25 deletions

View File

@ -158,7 +158,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
} }
void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr, void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
__be32 saddr, const struct ip_reply_arg *arg, __be32 saddr, const struct ip_reply_arg *arg,
unsigned int len); unsigned int len);

View File

@ -38,7 +38,6 @@ struct netns_ipv4 {
struct sock *fibnl; struct sock *fibnl;
struct sock **icmp_sk; struct sock **icmp_sk;
struct sock *tcp_sock;
struct inet_peer_base *peers; struct inet_peer_base *peers;
struct tcpm_hash_bucket *tcp_metrics_hash; struct tcpm_hash_bucket *tcp_metrics_hash;
unsigned int tcp_metrics_hash_mask; unsigned int tcp_metrics_hash_mask;

View File

@ -1463,20 +1463,33 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
/* /*
* Generic function to send a packet as reply to another packet. * Generic function to send a packet as reply to another packet.
* Used to send TCP resets so far. * Used to send some TCP resets/acks so far.
* *
* Should run single threaded per socket because it uses the sock * Use a fake percpu inet socket to avoid false sharing and contention.
* structure to pass arguments.
*/ */
void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr, static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
.sk = {
.__sk_common = {
.skc_refcnt = ATOMIC_INIT(1),
},
.sk_wmem_alloc = ATOMIC_INIT(1),
.sk_allocation = GFP_ATOMIC,
.sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
},
.pmtudisc = IP_PMTUDISC_WANT,
};
void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
__be32 saddr, const struct ip_reply_arg *arg, __be32 saddr, const struct ip_reply_arg *arg,
unsigned int len) unsigned int len)
{ {
struct inet_sock *inet = inet_sk(sk);
struct ip_options_data replyopts; struct ip_options_data replyopts;
struct ipcm_cookie ipc; struct ipcm_cookie ipc;
struct flowi4 fl4; struct flowi4 fl4;
struct rtable *rt = skb_rtable(skb); struct rtable *rt = skb_rtable(skb);
struct sk_buff *nskb;
struct sock *sk;
struct inet_sock *inet;
if (ip_options_echo(&replyopts.opt.opt, skb)) if (ip_options_echo(&replyopts.opt.opt, skb))
return; return;
@ -1494,38 +1507,39 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
flowi4_init_output(&fl4, arg->bound_dev_if, 0, flowi4_init_output(&fl4, arg->bound_dev_if, 0,
RT_TOS(arg->tos), RT_TOS(arg->tos),
RT_SCOPE_UNIVERSE, sk->sk_protocol, RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
ip_reply_arg_flowi_flags(arg), ip_reply_arg_flowi_flags(arg),
daddr, saddr, daddr, saddr,
tcp_hdr(skb)->source, tcp_hdr(skb)->dest); tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(sock_net(sk), &fl4); rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt)) if (IS_ERR(rt))
return; return;
/* And let IP do all the hard work. inet = &get_cpu_var(unicast_sock);
This chunk is not reenterable, hence spinlock.
Note that it uses the fact, that this function is called
with locally disabled BH and that sk cannot be already spinlocked.
*/
bh_lock_sock(sk);
inet->tos = arg->tos; inet->tos = arg->tos;
sk = &inet->sk;
sk->sk_priority = skb->priority; sk->sk_priority = skb->priority;
sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if; sk->sk_bound_dev_if = arg->bound_dev_if;
sock_net_set(sk, net);
__skb_queue_head_init(&sk->sk_write_queue);
sk->sk_sndbuf = sysctl_wmem_default;
ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
&ipc, &rt, MSG_DONTWAIT); &ipc, &rt, MSG_DONTWAIT);
if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { nskb = skb_peek(&sk->sk_write_queue);
if (nskb) {
if (arg->csumoffset >= 0) if (arg->csumoffset >= 0)
*((__sum16 *)skb_transport_header(skb) + *((__sum16 *)skb_transport_header(nskb) +
arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum)); arg->csum));
skb->ip_summed = CHECKSUM_NONE; nskb->ip_summed = CHECKSUM_NONE;
skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
ip_push_pending_frames(sk, &fl4); ip_push_pending_frames(sk, &fl4);
} }
bh_unlock_sock(sk); put_cpu_var(unicast_sock);
ip_rt_put(rt); ip_rt_put(rt);
} }

View File

@ -688,7 +688,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
net = dev_net(skb_dst(skb)->dev); net = dev_net(skb_dst(skb)->dev);
arg.tos = ip_hdr(skb)->tos; arg.tos = ip_hdr(skb)->tos;
ip_send_unicast_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
@ -771,7 +771,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
if (oif) if (oif)
arg.bound_dev_if = oif; arg.bound_dev_if = oif;
arg.tos = tos; arg.tos = tos;
ip_send_unicast_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
@ -2624,13 +2624,11 @@ EXPORT_SYMBOL(tcp_prot);
static int __net_init tcp_sk_init(struct net *net) static int __net_init tcp_sk_init(struct net *net)
{ {
return inet_ctl_sock_create(&net->ipv4.tcp_sock, return 0;
PF_INET, SOCK_RAW, IPPROTO_TCP, net);
} }
static void __net_exit tcp_sk_exit(struct net *net) static void __net_exit tcp_sk_exit(struct net *net)
{ {
inet_ctl_sock_destroy(net->ipv4.tcp_sock);
} }
static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)