tcp: tcp_mark_head_lost is only valid for sack-tcp

so tcp_is_sack/reno checks are removed from tcp_mark_head_lost.

Signed-off-by: zhang kai <zhangkaiheb@126.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
zhang kai 2020-05-07 11:08:30 +08:00 committed by David S. Miller
parent c75a33c84b
commit 636ef28d6e
1 changed files with 7 additions and 25 deletions

View File

@ -2183,8 +2183,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
} }
/* Detect loss in event "A" above by marking head of queue up as lost. /* Detect loss in event "A" above by marking head of queue up as lost.
* For non-SACK(Reno) senders, the first "packets" number of segments * For RFC3517 SACK, a segment is considered lost if it
* are considered lost. For RFC3517 SACK, a segment is considered lost if it
* has at least tp->reordering SACKed seqments above it; "packets" refers to * has at least tp->reordering SACKed seqments above it; "packets" refers to
* the maximum SACKed segments to pass before reaching this limit. * the maximum SACKed segments to pass before reaching this limit.
*/ */
@ -2192,10 +2191,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
int cnt, oldcnt, lost; int cnt;
unsigned int mss;
/* Use SACK to deduce losses of new sequences sent during recovery */ /* Use SACK to deduce losses of new sequences sent during recovery */
const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; const u32 loss_high = tp->snd_nxt;
WARN_ON(packets > tp->packets_out); WARN_ON(packets > tp->packets_out);
skb = tp->lost_skb_hint; skb = tp->lost_skb_hint;
@ -2218,27 +2216,12 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
break; break;
oldcnt = cnt; if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
if (tcp_is_reno(tp) ||
(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
cnt += tcp_skb_pcount(skb); cnt += tcp_skb_pcount(skb);
if (cnt > packets) { if (cnt > packets)
if (tcp_is_sack(tp) ||
(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
(oldcnt >= packets))
break; break;
mss = tcp_skb_mss(skb);
/* If needed, chop off the prefix to mark as lost. */
lost = (packets - oldcnt) * mss;
if (lost < skb->len &&
tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
lost, mss, GFP_ATOMIC) < 0)
break;
cnt = packets;
}
tcp_skb_mark_lost(tp, skb); tcp_skb_mark_lost(tp, skb);
if (mark_head) if (mark_head)
@ -2849,8 +2832,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
if (tcp_try_undo_partial(sk, prior_snd_una)) if (tcp_try_undo_partial(sk, prior_snd_una))
return; return;
/* Partial ACK arrived. Force fast retransmit. */ /* Partial ACK arrived. Force fast retransmit. */
do_lost = tcp_is_reno(tp) || do_lost = tcp_force_fast_retransmit(sk);
tcp_force_fast_retransmit(sk);
} }
if (tcp_try_undo_dsack(sk)) { if (tcp_try_undo_dsack(sk)) {
tcp_try_keep_open(sk); tcp_try_keep_open(sk);