tcp: bug fix Fast Open client retransmission

If SYN-ACK partially acks SYN-data, the client retransmits the
remaining data by tcp_retransmit_skb(). This increments lost recovery
state variables like tp->retrans_out in Open state. If loss recovery
happens before the retransmission is acked, it triggers the WARN_ON
check in tcp_fastretrans_alert(). For example: the client sends
SYN-data, gets SYN-ACK acking only ISN, retransmits data, sends
another 4 data packets and get 3 dupacks.

Since the retransmission is not caused by network drop it should not
update the recovery state variables. Further the server may return a
smaller MSS than the cached MSS used for SYN-data, so the retranmission
needs a loop. Otherwise some data will not be retransmitted until timeout
or other loss recovery events.

Signed-off-by: Yuchung Cheng <ycheng@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Yuchung Cheng 2012-12-06 08:45:32 +00:00 committed by David S. Miller
parent 1afa471706
commit 93b174ad71
3 changed files with 16 additions and 6 deletions

View File

@ -525,6 +525,7 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk,
extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle); int nonagle);
extern bool tcp_may_send_now(struct sock *sk); extern bool tcp_may_send_now(struct sock *sk);
extern int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
extern void tcp_retransmit_timer(struct sock *sk); extern void tcp_retransmit_timer(struct sock *sk);
extern void tcp_xmit_retransmit_queue(struct sock *); extern void tcp_xmit_retransmit_queue(struct sock *);

View File

@ -5645,7 +5645,11 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
if (data) { /* Retransmit unacked data in SYN */ if (data) { /* Retransmit unacked data in SYN */
tcp_retransmit_skb(sk, data); tcp_for_write_queue_from(data, sk) {
if (data == tcp_send_head(sk) ||
__tcp_retransmit_skb(sk, data))
break;
}
tcp_rearm_rto(sk); tcp_rearm_rto(sk);
return true; return true;
} }

View File

@ -2309,12 +2309,11 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
* state updates are done by the caller. Returns non-zero if an * state updates are done by the caller. Returns non-zero if an
* error occurred which prevented the send. * error occurred which prevented the send.
*/ */
int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
unsigned int cur_mss; unsigned int cur_mss;
int err;
/* Inconslusive MTU probe */ /* Inconslusive MTU probe */
if (icsk->icsk_mtup.probe_size) { if (icsk->icsk_mtup.probe_size) {
@ -2387,11 +2386,17 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
GFP_ATOMIC); GFP_ATOMIC);
err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-ENOBUFS; -ENOBUFS;
} else { } else {
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
} }
}
int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
int err = __tcp_retransmit_skb(sk, skb);
if (err == 0) { if (err == 0) {
/* Update global TCP statistics. */ /* Update global TCP statistics. */