tcp: lack of available data can also cause TSO defer

tcp_tso_should_defer() can return true in three different cases :

 1) We are cwnd-limited
 2) We are rwnd-limited
 3) We are application limited.

Neal pointed out that my recent fix went too far, since
it assumed that if we were not in 1) case, we must be rwnd-limited

Fix this by properly populating the is_cwnd_limited and
is_rwnd_limited booleans.

After this change, we can finally move the silly check for FIN
flag only for the application-limited case.

The same move for EOR bit will be handled in net-next,
since commit 1c09f7d073 ("tcp: do not try to defer skbs
with eor mark (MSG_EOR)") is scheduled for linux-4.21

Tested by running 200 concurrent netperf -t TCP_RR -- -r 60000,100
and checking none of them was rwnd_limited in the chrono_stat
output from "ss -ti" command.

Fixes: 41727549de ("tcp: Do not underestimate rwnd_limited")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Suggested-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Neal Cardwell <ncardwell@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2018-12-06 09:58:24 -08:00 committed by David S. Miller
parent 1b4e5ad5d6
commit f9bfe4e6a9
1 changed files with 24 additions and 11 deletions

View File

@ -1904,7 +1904,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
* This algorithm is from John Heffner. * This algorithm is from John Heffner.
*/ */
static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
bool *is_cwnd_limited, u32 max_segs) bool *is_cwnd_limited,
bool *is_rwnd_limited,
u32 max_segs)
{ {
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
u32 age, send_win, cong_win, limit, in_flight; u32 age, send_win, cong_win, limit, in_flight;
@ -1912,9 +1914,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
struct sk_buff *head; struct sk_buff *head;
int win_divisor; int win_divisor;
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto send_now;
if (icsk->icsk_ca_state >= TCP_CA_Recovery) if (icsk->icsk_ca_state >= TCP_CA_Recovery)
goto send_now; goto send_now;
@ -1973,10 +1972,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
if (age < (tp->srtt_us >> 4)) if (age < (tp->srtt_us >> 4))
goto send_now; goto send_now;
/* Ok, it looks like it is advisable to defer. */ /* Ok, it looks like it is advisable to defer.
* Three cases are tracked :
if (cong_win < send_win && cong_win <= skb->len) * 1) We are cwnd-limited
* 2) We are rwnd-limited
* 3) We are application limited.
*/
if (cong_win < send_win) {
if (cong_win <= skb->len) {
*is_cwnd_limited = true; *is_cwnd_limited = true;
return true;
}
} else {
if (send_win <= skb->len) {
*is_rwnd_limited = true;
return true;
}
}
/* If this packet won't get more data, do not wait. */
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto send_now;
return true; return true;
@ -2356,12 +2372,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
} else { } else {
if (!push_one && if (!push_one &&
tcp_tso_should_defer(sk, skb, &is_cwnd_limited, tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
max_segs)) { &is_rwnd_limited, max_segs))
if (!is_cwnd_limited)
is_rwnd_limited = true;
break; break;
} }
}
limit = mss_now; limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp)) if (tso_segs > 1 && !tcp_urg_mode(tp))