mirror of https://gitee.com/openkylin/linux.git
net/tcp_fastopen: Add snmp counter for blackhole detection
This counter records the number of times the firewall blackhole issue is detected and active TFO is disabled. Signed-off-by: Wei Wang <weiwan@google.com> Acked-by: Yuchung Cheng <ycheng@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
cf1ef3f071
commit
46c2fa3987
|
@ -1507,7 +1507,7 @@ struct tcp_fastopen_context {
|
|||
};
|
||||
|
||||
extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
|
||||
void tcp_fastopen_active_disable(void);
|
||||
void tcp_fastopen_active_disable(struct sock *sk);
|
||||
bool tcp_fastopen_active_should_disable(struct sock *sk);
|
||||
void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
|
||||
void tcp_fastopen_active_timeout_reset(void);
|
||||
|
|
|
@ -259,6 +259,7 @@ enum
|
|||
LINUX_MIB_TCPFASTOPENPASSIVEFAIL, /* TCPFastOpenPassiveFail */
|
||||
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
|
||||
LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
|
||||
LINUX_MIB_TCPFASTOPENBLACKHOLE, /* TCPFastOpenBlackholeDetect */
|
||||
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
|
||||
LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
|
||||
LINUX_MIB_TCPAUTOCORKING, /* TCPAutoCorking */
|
||||
|
|
|
@ -281,6 +281,7 @@ static const struct snmp_mib snmp4_net_list[] = {
|
|||
SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL),
|
||||
SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
|
||||
SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
|
||||
SNMP_MIB_ITEM("TCPFastOpenBlackhole", LINUX_MIB_TCPFASTOPENBLACKHOLE),
|
||||
SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
|
||||
SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
|
||||
SNMP_MIB_ITEM("TCPAutoCorking", LINUX_MIB_TCPAUTOCORKING),
|
||||
|
|
|
@ -410,10 +410,11 @@ static unsigned long tfo_active_disable_stamp __read_mostly;
|
|||
/* Disable active TFO and record current jiffies and
|
||||
* tfo_active_disable_times
|
||||
*/
|
||||
void tcp_fastopen_active_disable(void)
|
||||
void tcp_fastopen_active_disable(struct sock *sk)
|
||||
{
|
||||
atomic_inc(&tfo_active_disable_times);
|
||||
tfo_active_disable_stamp = jiffies;
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENBLACKHOLE);
|
||||
}
|
||||
|
||||
/* Reset tfo_active_disable_times to 0 */
|
||||
|
@ -469,7 +470,7 @@ void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
|
|||
if (p && !rb_next(p)) {
|
||||
skb = rb_entry(p, struct sk_buff, rbnode);
|
||||
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
|
||||
tcp_fastopen_active_disable();
|
||||
tcp_fastopen_active_disable(sk);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5307,7 +5307,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
|
|||
*/
|
||||
if (tp->syn_fastopen && !tp->data_segs_in &&
|
||||
sk->sk_state == TCP_ESTABLISHED)
|
||||
tcp_fastopen_active_disable();
|
||||
tcp_fastopen_active_disable(sk);
|
||||
tcp_send_challenge_ack(sk, skb);
|
||||
}
|
||||
goto discard;
|
||||
|
@ -6061,7 +6061,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
|||
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
|
||||
/* Receive out of order FIN after close() */
|
||||
if (tp->syn_fastopen && th->fin)
|
||||
tcp_fastopen_active_disable();
|
||||
tcp_fastopen_active_disable(sk);
|
||||
tcp_done(sk);
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
|
||||
return 1;
|
||||
|
|
Loading…
Reference in New Issue