mirror of https://gitee.com/openkylin/linux.git
Merge branch 'tcp_cong_ctrl_refactoring'
Yuchung Cheng says: ==================== tcp: congestion control refactoring This patch set refactors the sequence of congestion control, loss recovery, and transmission logic in TCP ack processing. The design goal is to decouple and sequence them in the following order: 0. ACK accounting: free or tag sent packets [unchanged] 1. loss recovery: identify lost/ecn packets and update congestion state 2. congestion control: up/down cwnd and pacing rate based on (1) 3. transmission: send new or retransmit old based on (1) and (2) This refactoring makes the cwnd changes more clear because it's done in one place. The packet accounting is also more robust especially for connections that do not support SACK. Patch 1-4 and 6 are refactoring and patch 5 improves TCP performance under reordering. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ffeb6437f0
|
@ -256,6 +256,7 @@ struct tcp_sock {
|
|||
u32 prr_delivered; /* Number of newly delivered packets to
|
||||
* receiver in Recovery. */
|
||||
u32 prr_out; /* Total number of pkts sent during Recovery. */
|
||||
u32 delivered; /* Total data packets delivered incl. rexmits */
|
||||
|
||||
u32 rcv_wnd; /* Current receiver window */
|
||||
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
|
||||
|
|
|
@ -126,6 +126,10 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
|
|||
#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
|
||||
#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
|
||||
|
||||
#define REXMIT_NONE 0 /* no loss recovery to do */
|
||||
#define REXMIT_LOST 1 /* retransmit packets marked lost */
|
||||
#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
|
||||
|
||||
/* Adapt the MSS value used to make delayed ack decision to the
|
||||
* real world.
|
||||
*/
|
||||
|
@ -1210,6 +1214,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
|
|||
sacked |= TCPCB_SACKED_ACKED;
|
||||
state->flag |= FLAG_DATA_SACKED;
|
||||
tp->sacked_out += pcount;
|
||||
tp->delivered += pcount; /* Out-of-order packets delivered */
|
||||
|
||||
fack_count += pcount;
|
||||
|
||||
|
@ -1821,8 +1826,12 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
|
|||
static void tcp_add_reno_sack(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
u32 prior_sacked = tp->sacked_out;
|
||||
|
||||
tp->sacked_out++;
|
||||
tcp_check_reno_reordering(sk, 0);
|
||||
if (tp->sacked_out > prior_sacked)
|
||||
tp->delivered++; /* Some out-of-order packet is delivered */
|
||||
tcp_verify_left_out(tp);
|
||||
}
|
||||
|
||||
|
@ -1834,6 +1843,7 @@ static void tcp_remove_reno_sacks(struct sock *sk, int acked)
|
|||
|
||||
if (acked > 0) {
|
||||
/* One ACK acked hole. The rest eat duplicate ACKs. */
|
||||
tp->delivered += max_t(int, acked - tp->sacked_out, 1);
|
||||
if (acked - 1 >= tp->sacked_out)
|
||||
tp->sacked_out = 0;
|
||||
else
|
||||
|
@ -2467,14 +2477,12 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
|
|||
tcp_ecn_queue_cwr(tp);
|
||||
}
|
||||
|
||||
static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
|
||||
int fast_rexmit, int flag)
|
||||
static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
|
||||
int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int sndcnt = 0;
|
||||
int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
|
||||
int newly_acked_sacked = prior_unsacked -
|
||||
(tp->packets_out - tp->sacked_out);
|
||||
|
||||
if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd))
|
||||
return;
|
||||
|
@ -2492,7 +2500,8 @@ static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
|
|||
} else {
|
||||
sndcnt = min(delta, newly_acked_sacked);
|
||||
}
|
||||
sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
|
||||
/* Force a fast retransmit upon entering fast recovery */
|
||||
sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
|
||||
tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
|
||||
}
|
||||
|
||||
|
@ -2537,7 +2546,7 @@ static void tcp_try_keep_open(struct sock *sk)
|
|||
}
|
||||
}
|
||||
|
||||
static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
|
||||
static void tcp_try_to_open(struct sock *sk, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
|
@ -2551,8 +2560,6 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
|
|||
|
||||
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
|
||||
tcp_try_keep_open(sk);
|
||||
} else {
|
||||
tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2662,7 +2669,8 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
|
|||
/* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
|
||||
* recovered or spurious. Otherwise retransmits more on partial ACKs.
|
||||
*/
|
||||
static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
|
||||
static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
|
||||
int *rexmit)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
bool recovered = !before(tp->snd_una, tp->high_seq);
|
||||
|
@ -2684,10 +2692,15 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
|
|||
tp->frto = 0; /* Step 3.a. loss was real */
|
||||
} else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
|
||||
tp->high_seq = tp->snd_nxt;
|
||||
__tcp_push_pending_frames(sk, tcp_current_mss(sk),
|
||||
TCP_NAGLE_OFF);
|
||||
if (after(tp->snd_nxt, tp->high_seq))
|
||||
return; /* Step 2.b */
|
||||
/* Step 2.b. Try send new data (but deferred until cwnd
|
||||
* is updated in tcp_ack()). Otherwise fall back to
|
||||
* the conventional recovery.
|
||||
*/
|
||||
if (tcp_send_head(sk) &&
|
||||
after(tcp_wnd_end(tp), tp->snd_nxt)) {
|
||||
*rexmit = REXMIT_NEW;
|
||||
return;
|
||||
}
|
||||
tp->frto = 0;
|
||||
}
|
||||
}
|
||||
|
@ -2706,12 +2719,11 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
|
|||
else if (flag & FLAG_SND_UNA_ADVANCED)
|
||||
tcp_reset_reno_sack(tp);
|
||||
}
|
||||
tcp_xmit_retransmit_queue(sk);
|
||||
*rexmit = REXMIT_LOST;
|
||||
}
|
||||
|
||||
/* Undo during fast recovery after partial ACK. */
|
||||
static bool tcp_try_undo_partial(struct sock *sk, const int acked,
|
||||
const int prior_unsacked, int flag)
|
||||
static bool tcp_try_undo_partial(struct sock *sk, const int acked)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
|
@ -2726,10 +2738,8 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
|
|||
* can undo. Otherwise we clock out new packets but do not
|
||||
* mark more packets lost or retransmit more.
|
||||
*/
|
||||
if (tp->retrans_out) {
|
||||
tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
|
||||
if (tp->retrans_out)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!tcp_any_retrans_done(sk))
|
||||
tp->retrans_stamp = 0;
|
||||
|
@ -2748,21 +2758,21 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
|
|||
* taking into account both packets sitting in receiver's buffer and
|
||||
* packets lost by network.
|
||||
*
|
||||
* Besides that it does CWND reduction, when packet loss is detected
|
||||
* and changes state of machine.
|
||||
* Besides that it updates the congestion state when packet loss or ECN
|
||||
* is detected. But it does not reduce the cwnd, it is done by the
|
||||
* congestion control later.
|
||||
*
|
||||
* It does _not_ decide what to send, it is made in function
|
||||
* tcp_xmit_retransmit_queue().
|
||||
*/
|
||||
static void tcp_fastretrans_alert(struct sock *sk, const int acked,
|
||||
const int prior_unsacked,
|
||||
bool is_dupack, int flag)
|
||||
bool is_dupack, int *ack_flag, int *rexmit)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int fast_rexmit = 0, flag = *ack_flag;
|
||||
bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
|
||||
(tcp_fackets_out(tp) > tp->reordering));
|
||||
int fast_rexmit = 0;
|
||||
|
||||
if (WARN_ON(!tp->packets_out && tp->sacked_out))
|
||||
tp->sacked_out = 0;
|
||||
|
@ -2809,8 +2819,10 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
|
|||
|
||||
/* Use RACK to detect loss */
|
||||
if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS &&
|
||||
tcp_rack_mark_lost(sk))
|
||||
tcp_rack_mark_lost(sk)) {
|
||||
flag |= FLAG_LOST_RETRANS;
|
||||
*ack_flag |= FLAG_LOST_RETRANS;
|
||||
}
|
||||
|
||||
/* E. Process state. */
|
||||
switch (icsk->icsk_ca_state) {
|
||||
|
@ -2819,7 +2831,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
|
|||
if (tcp_is_reno(tp) && is_dupack)
|
||||
tcp_add_reno_sack(sk);
|
||||
} else {
|
||||
if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag))
|
||||
if (tcp_try_undo_partial(sk, acked))
|
||||
return;
|
||||
/* Partial ACK arrived. Force fast retransmit. */
|
||||
do_lost = tcp_is_reno(tp) ||
|
||||
|
@ -2831,7 +2843,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
|
|||
}
|
||||
break;
|
||||
case TCP_CA_Loss:
|
||||
tcp_process_loss(sk, flag, is_dupack);
|
||||
tcp_process_loss(sk, flag, is_dupack, rexmit);
|
||||
if (icsk->icsk_ca_state != TCP_CA_Open &&
|
||||
!(flag & FLAG_LOST_RETRANS))
|
||||
return;
|
||||
|
@ -2848,7 +2860,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
|
|||
tcp_try_undo_dsack(sk);
|
||||
|
||||
if (!tcp_time_to_recover(sk, flag)) {
|
||||
tcp_try_to_open(sk, flag, prior_unsacked);
|
||||
tcp_try_to_open(sk, flag);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2870,8 +2882,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
|
|||
|
||||
if (do_lost)
|
||||
tcp_update_scoreboard(sk, fast_rexmit);
|
||||
tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag);
|
||||
tcp_xmit_retransmit_queue(sk);
|
||||
*rexmit = REXMIT_LOST;
|
||||
}
|
||||
|
||||
/* Kathleen Nichols' algorithm for tracking the minimum value of
|
||||
|
@ -3093,7 +3104,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
|
|||
* arrived at the other end.
|
||||
*/
|
||||
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
||||
u32 prior_snd_una,
|
||||
u32 prior_snd_una, int *acked,
|
||||
struct tcp_sacktag_state *sack)
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
|
@ -3151,10 +3162,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
|||
flag |= FLAG_ORIG_SACK_ACKED;
|
||||
}
|
||||
|
||||
if (sacked & TCPCB_SACKED_ACKED)
|
||||
if (sacked & TCPCB_SACKED_ACKED) {
|
||||
tp->sacked_out -= acked_pcount;
|
||||
else if (tcp_is_sack(tp) && !tcp_skb_spurious_retrans(tp, skb))
|
||||
tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
|
||||
} else if (tcp_is_sack(tp)) {
|
||||
tp->delivered += acked_pcount;
|
||||
if (!tcp_skb_spurious_retrans(tp, skb))
|
||||
tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
|
||||
}
|
||||
if (sacked & TCPCB_LOST)
|
||||
tp->lost_out -= acked_pcount;
|
||||
|
||||
|
@ -3263,6 +3277,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
|||
}
|
||||
}
|
||||
#endif
|
||||
*acked = pkts_acked;
|
||||
return flag;
|
||||
}
|
||||
|
||||
|
@ -3296,9 +3311,6 @@ static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
|
|||
/* Decide wheather to run the increase function of congestion control. */
|
||||
static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
|
||||
{
|
||||
if (tcp_in_cwnd_reduction(sk))
|
||||
return false;
|
||||
|
||||
/* If reordering is high then always grow cwnd whenever data is
|
||||
* delivered regardless of its ordering. Otherwise stay conservative
|
||||
* and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/
|
||||
|
@ -3311,6 +3323,24 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
|
|||
return flag & FLAG_DATA_ACKED;
|
||||
}
|
||||
|
||||
/* The "ultimate" congestion control function that aims to replace the rigid
|
||||
* cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction).
|
||||
* It's called toward the end of processing an ACK with precise rate
|
||||
* information. All transmission or retransmission are delayed afterwards.
|
||||
*/
|
||||
static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
|
||||
int flag)
|
||||
{
|
||||
if (tcp_in_cwnd_reduction(sk)) {
|
||||
/* Reduce cwnd if state mandates */
|
||||
tcp_cwnd_reduction(sk, acked_sacked, flag);
|
||||
} else if (tcp_may_raise_cwnd(sk, flag)) {
|
||||
/* Advance cwnd if state allows */
|
||||
tcp_cong_avoid(sk, ack, acked_sacked);
|
||||
}
|
||||
tcp_update_pacing_rate(sk);
|
||||
}
|
||||
|
||||
/* Check that window update is acceptable.
|
||||
* The function assumes that snd_una<=ack<=snd_next.
|
||||
*/
|
||||
|
@ -3506,6 +3536,27 @@ static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
|
|||
icsk->icsk_ca_ops->in_ack_event(sk, flags);
|
||||
}
|
||||
|
||||
/* Congestion control has updated the cwnd already. So if we're in
|
||||
* loss recovery then now we do any new sends (for FRTO) or
|
||||
* retransmits (for CA_Loss or CA_recovery) that make sense.
|
||||
*/
|
||||
static void tcp_xmit_recovery(struct sock *sk, int rexmit)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (rexmit == REXMIT_NONE)
|
||||
return;
|
||||
|
||||
if (unlikely(rexmit == 2)) {
|
||||
__tcp_push_pending_frames(sk, tcp_current_mss(sk),
|
||||
TCP_NAGLE_OFF);
|
||||
if (after(tp->snd_nxt, tp->high_seq))
|
||||
return;
|
||||
tp->frto = 0;
|
||||
}
|
||||
tcp_xmit_retransmit_queue(sk);
|
||||
}
|
||||
|
||||
/* This routine deals with incoming acks, but not outgoing ones. */
|
||||
static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
{
|
||||
|
@ -3518,8 +3569,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|||
bool is_dupack = false;
|
||||
u32 prior_fackets;
|
||||
int prior_packets = tp->packets_out;
|
||||
const int prior_unsacked = tp->packets_out - tp->sacked_out;
|
||||
u32 prior_delivered = tp->delivered;
|
||||
int acked = 0; /* Number of packets newly acked */
|
||||
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
|
||||
|
||||
sack_state.first_sackt.v64 = 0;
|
||||
|
||||
|
@ -3608,23 +3660,16 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|||
goto no_queue;
|
||||
|
||||
/* See if we can take anything off of the retransmit queue. */
|
||||
acked = tp->packets_out;
|
||||
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una,
|
||||
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
|
||||
&sack_state);
|
||||
acked -= tp->packets_out;
|
||||
|
||||
if (tcp_ack_is_dubious(sk, flag)) {
|
||||
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
|
||||
tcp_fastretrans_alert(sk, acked, prior_unsacked,
|
||||
is_dupack, flag);
|
||||
tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
|
||||
}
|
||||
if (tp->tlp_high_seq)
|
||||
tcp_process_tlp_ack(sk, ack, flag);
|
||||
|
||||
/* Advance cwnd if state allows */
|
||||
if (tcp_may_raise_cwnd(sk, flag))
|
||||
tcp_cong_avoid(sk, ack, acked);
|
||||
|
||||
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
|
||||
struct dst_entry *dst = __sk_dst_get(sk);
|
||||
if (dst)
|
||||
|
@ -3633,14 +3678,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|||
|
||||
if (icsk->icsk_pending == ICSK_TIME_RETRANS)
|
||||
tcp_schedule_loss_probe(sk);
|
||||
tcp_update_pacing_rate(sk);
|
||||
tcp_cong_control(sk, ack, tp->delivered - prior_delivered, flag);
|
||||
tcp_xmit_recovery(sk, rexmit);
|
||||
return 1;
|
||||
|
||||
no_queue:
|
||||
/* If data was DSACKed, see if we can undo a cwnd reduction. */
|
||||
if (flag & FLAG_DSACKING_ACK)
|
||||
tcp_fastretrans_alert(sk, acked, prior_unsacked,
|
||||
is_dupack, flag);
|
||||
tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
|
||||
/* If this ack opens up a zero window, clear backoff. It was
|
||||
* being used to time the probes, and is probably far higher than
|
||||
* it needs to be for normal retransmission.
|
||||
|
@ -3663,8 +3708,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|||
if (TCP_SKB_CB(skb)->sacked) {
|
||||
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
|
||||
&sack_state);
|
||||
tcp_fastretrans_alert(sk, acked, prior_unsacked,
|
||||
is_dupack, flag);
|
||||
tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
|
||||
tcp_xmit_recovery(sk, rexmit);
|
||||
}
|
||||
|
||||
SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
|
||||
|
|
Loading…
Reference in New Issue