net: annotate lockless accesses to sk->sk_max_ack_backlog
sk->sk_max_ack_backlog can be read without any lock being held at least in TCP/DCCP cases. We need to use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing and/or potential KCSAN warnings. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
288efe8606
commit
099ecf59f0
|
@ -869,7 +869,7 @@ static inline void sk_acceptq_added(struct sock *sk)
|
||||||
|
|
||||||
static inline bool sk_acceptq_is_full(const struct sock *sk)
|
static inline bool sk_acceptq_is_full(const struct sock *sk)
|
||||||
{
|
{
|
||||||
return READ_ONCE(sk->sk_ack_backlog) > sk->sk_max_ack_backlog;
|
return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -944,7 +944,7 @@ int inet_dccp_listen(struct socket *sock, int backlog)
|
||||||
if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
|
if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
sk->sk_max_ack_backlog = backlog;
|
WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
|
||||||
/* Really, if the socket is already in listen state
|
/* Really, if the socket is already in listen state
|
||||||
* we can only allow the backlog to be adjusted.
|
* we can only allow the backlog to be adjusted.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -208,7 +208,7 @@ int inet_listen(struct socket *sock, int backlog)
|
||||||
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
|
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
sk->sk_max_ack_backlog = backlog;
|
WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
|
||||||
/* Really, if the socket is already in listen state
|
/* Really, if the socket is already in listen state
|
||||||
* we can only allow the backlog to be adjusted.
|
* we can only allow the backlog to be adjusted.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -716,7 +716,7 @@ static void reqsk_timer_handler(struct timer_list *t)
|
||||||
* ones are about to clog our table.
|
* ones are about to clog our table.
|
||||||
*/
|
*/
|
||||||
qlen = reqsk_queue_len(queue);
|
qlen = reqsk_queue_len(queue);
|
||||||
if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
|
if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
|
||||||
int young = reqsk_queue_len_young(queue) << 1;
|
int young = reqsk_queue_len_young(queue) << 1;
|
||||||
|
|
||||||
while (thresh > 2) {
|
while (thresh > 2) {
|
||||||
|
|
|
@ -3226,7 +3226,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
|
||||||
* tcpi_sacked -> max backlog
|
* tcpi_sacked -> max backlog
|
||||||
*/
|
*/
|
||||||
info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
|
info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
|
||||||
info->tcpi_sacked = sk->sk_max_ack_backlog;
|
info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
||||||
|
|
||||||
if (inet_sk_state_load(sk) == TCP_LISTEN) {
|
if (inet_sk_state_load(sk) == TCP_LISTEN) {
|
||||||
r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
|
r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
|
||||||
r->idiag_wqueue = sk->sk_max_ack_backlog;
|
r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
|
||||||
} else if (sk->sk_type == SOCK_STREAM) {
|
} else if (sk->sk_type == SOCK_STREAM) {
|
||||||
const struct tcp_sock *tp = tcp_sk(sk);
|
const struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
|
|
|
@ -532,7 +532,7 @@ META_COLLECTOR(int_sk_max_ack_bl)
|
||||||
*err = -1;
|
*err = -1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
dst->value = sk->sk_max_ack_backlog;
|
dst->value = READ_ONCE(sk->sk_max_ack_backlog);
|
||||||
}
|
}
|
||||||
|
|
||||||
META_COLLECTOR(int_sk_prio)
|
META_COLLECTOR(int_sk_prio)
|
||||||
|
|
|
@ -426,7 +426,7 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
||||||
r->idiag_wqueue = infox->asoc->sndbuf_used;
|
r->idiag_wqueue = infox->asoc->sndbuf_used;
|
||||||
} else {
|
} else {
|
||||||
r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
|
r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
|
||||||
r->idiag_wqueue = sk->sk_max_ack_backlog;
|
r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
|
||||||
}
|
}
|
||||||
if (infox->sctpinfo)
|
if (infox->sctpinfo)
|
||||||
sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
|
sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
|
||||||
|
|
|
@ -8376,7 +8376,7 @@ static int sctp_listen_start(struct sock *sk, int backlog)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sk->sk_max_ack_backlog = backlog;
|
WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
|
||||||
return sctp_hash_endpoint(ep);
|
return sctp_hash_endpoint(ep);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8430,7 +8430,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
|
||||||
|
|
||||||
/* If we are already listening, just update the backlog */
|
/* If we are already listening, just update the backlog */
|
||||||
if (sctp_sstate(sk, LISTENING))
|
if (sctp_sstate(sk, LISTENING))
|
||||||
sk->sk_max_ack_backlog = backlog;
|
WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
|
||||||
else {
|
else {
|
||||||
err = sctp_listen_start(sk, backlog);
|
err = sctp_listen_start(sk, backlog);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
Loading…
Reference in New Issue