RxRPC fixes
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEqG5UsNXhtOCrfGQP+7dXa6fLC2sFAl439W0ACgkQ+7dXa6fL C2uT5RAAl+rytJS768Aa6c/S9TPBaNOKzFQmgNaWzAtgC5BAtfZf1Bl0okFOba2V QtcFmTogRl7BEnf9S3w3z25qj7Nv5udjIaAbH7cO6j1vuIwRpvtEZ7WXlE3L4hxA DscSysqI9L5ISsnzfloUvA/biA9azsH2ckgMxFo++YmLHvagXW0lmkE4yZw+aZ/T DstFAMAnFQwEAyu+Et3bo32/382aJh+gBGgV/2wuHBPgzQMhiWdosKdNnEZtZ3V2 HvgVuAU2V4hOf8OuaPuBnZrJPondTq5e1W+5mQiYLYzTCTOs6rZ1gUGfEXth3d1H ABJ0FoyPgN783msb0yL6OOLMWiTc9USiLB8a3U2vJOD+hQTkPSYSGsCObeIGVW/Y dbBd5fwudF78LIP9TjYWbau3vHtV73N4Mc1UT219jew0+Hi1ik5VIj8q0wdBIkzm vKcVznXnPReOUBVKmqA2scnXs8EA4w6bTCjyd3JYUBK2qLchz366s+0oYyIK8Nq0 dy8TVISaCSfohI4nAYAv90AUIDa+zdHKuttBsAD90yVM/wrnokqc/RonfWjHs32k SerM0RclPidool3Hkc/w0uvPywDQg/S5sKAW6E6lpfTppOwIY0wq/yZc5N2d9FZV UrPZVGhQszC7AM/CBKXDT0ZMrPnAfGO8PMPLyFrAcv2p9O7XuWA= =kHIG -----END PGP SIGNATURE----- Merge tag 'rxrpc-fixes-20200203' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs David Howells says: ==================== RxRPC fixes Here are a number of fixes for AF_RXRPC: (1) Fix a potential use after free in rxrpc_put_local() where it was accessing the object just put to get tracing information. (2) Fix insufficient notifications being generated by the function that queues data packets on a call. This occasionally causes recvmsg() to stall indefinitely. (3) Fix a number of packet-transmitting work functions to hold an active count on the local endpoint so that the UDP socket doesn't get destroyed whilst they're calling kernel_sendmsg() on it. (4) Fix a NULL pointer deref that stemmed from a call's connection pointer being cleared when the call was disconnected. Changes: v2: Removed a couple of BUG() statements that got added. ==================== Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
3d80c653f9
|
@ -194,6 +194,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
|
|||
service_in_use:
|
||||
write_unlock(&local->services_lock);
|
||||
rxrpc_unuse_local(local);
|
||||
rxrpc_put_local(local);
|
||||
ret = -EADDRINUSE;
|
||||
error_unlock:
|
||||
release_sock(&rx->sk);
|
||||
|
@ -899,6 +900,7 @@ static int rxrpc_release_sock(struct sock *sk)
|
|||
rxrpc_purge_queue(&sk->sk_receive_queue);
|
||||
|
||||
rxrpc_unuse_local(rx->local);
|
||||
rxrpc_put_local(rx->local);
|
||||
rx->local = NULL;
|
||||
key_put(rx->key);
|
||||
rx->key = NULL;
|
||||
|
|
|
@ -490,6 +490,7 @@ enum rxrpc_call_flag {
|
|||
RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
|
||||
RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */
|
||||
RXRPC_CALL_IS_INTR, /* The call is interruptible */
|
||||
RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1021,6 +1022,16 @@ void rxrpc_unuse_local(struct rxrpc_local *);
|
|||
void rxrpc_queue_local(struct rxrpc_local *);
|
||||
void rxrpc_destroy_all_locals(struct rxrpc_net *);
|
||||
|
||||
static inline bool __rxrpc_unuse_local(struct rxrpc_local *local)
|
||||
{
|
||||
return atomic_dec_return(&local->active_users) == 0;
|
||||
}
|
||||
|
||||
static inline bool __rxrpc_use_local(struct rxrpc_local *local)
|
||||
{
|
||||
return atomic_fetch_add_unless(&local->active_users, 1, 0) != 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* misc.c
|
||||
*/
|
||||
|
|
|
@ -493,7 +493,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
|
|||
|
||||
_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
|
||||
|
||||
if (conn)
|
||||
if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
|
||||
rxrpc_disconnect_call(call);
|
||||
if (call->security)
|
||||
call->security->free_call_crypto(call);
|
||||
|
@ -569,6 +569,7 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
|
|||
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
|
||||
struct rxrpc_net *rxnet = call->rxnet;
|
||||
|
||||
rxrpc_put_connection(call->conn);
|
||||
rxrpc_put_peer(call->peer);
|
||||
kfree(call->rxtx_buffer);
|
||||
kfree(call->rxtx_annotations);
|
||||
|
@ -590,7 +591,6 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
|
|||
|
||||
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
|
||||
ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
|
||||
ASSERTCMP(call->conn, ==, NULL);
|
||||
|
||||
rxrpc_cleanup_ring(call);
|
||||
rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
|
||||
|
|
|
@ -785,6 +785,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
|
|||
u32 cid;
|
||||
|
||||
spin_lock(&conn->channel_lock);
|
||||
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
|
||||
|
||||
cid = call->cid;
|
||||
if (cid) {
|
||||
|
@ -792,7 +793,6 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
|
|||
chan = &conn->channels[channel];
|
||||
}
|
||||
trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
|
||||
call->conn = NULL;
|
||||
|
||||
/* Calls that have never actually been assigned a channel can simply be
|
||||
* discarded. If the conn didn't get used either, it will follow
|
||||
|
@ -908,7 +908,6 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
|
|||
spin_unlock(&rxnet->client_conn_cache_lock);
|
||||
out_2:
|
||||
spin_unlock(&conn->channel_lock);
|
||||
rxrpc_put_connection(conn);
|
||||
_leave("");
|
||||
return;
|
||||
|
||||
|
|
|
@ -438,16 +438,12 @@ static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn)
|
|||
/*
|
||||
* connection-level event processor
|
||||
*/
|
||||
void rxrpc_process_connection(struct work_struct *work)
|
||||
static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
|
||||
{
|
||||
struct rxrpc_connection *conn =
|
||||
container_of(work, struct rxrpc_connection, processor);
|
||||
struct sk_buff *skb;
|
||||
u32 abort_code = RX_PROTOCOL_ERROR;
|
||||
int ret;
|
||||
|
||||
rxrpc_see_connection(conn);
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
|
||||
rxrpc_secure_connection(conn);
|
||||
|
||||
|
@ -475,18 +471,32 @@ void rxrpc_process_connection(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
out:
|
||||
rxrpc_put_connection(conn);
|
||||
_leave("");
|
||||
return;
|
||||
|
||||
requeue_and_leave:
|
||||
skb_queue_head(&conn->rx_queue, skb);
|
||||
goto out;
|
||||
return;
|
||||
|
||||
protocol_error:
|
||||
if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
|
||||
goto requeue_and_leave;
|
||||
rxrpc_free_skb(skb, rxrpc_skb_freed);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
void rxrpc_process_connection(struct work_struct *work)
|
||||
{
|
||||
struct rxrpc_connection *conn =
|
||||
container_of(work, struct rxrpc_connection, processor);
|
||||
|
||||
rxrpc_see_connection(conn);
|
||||
|
||||
if (__rxrpc_use_local(conn->params.local)) {
|
||||
rxrpc_do_process_connection(conn);
|
||||
rxrpc_unuse_local(conn->params.local);
|
||||
}
|
||||
|
||||
rxrpc_put_connection(conn);
|
||||
_leave("");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -171,6 +171,8 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
|
|||
|
||||
_enter("%d,%x", conn->debug_id, call->cid);
|
||||
|
||||
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
|
||||
|
||||
if (rcu_access_pointer(chan->call) == call) {
|
||||
/* Save the result of the call so that we can repeat it if necessary
|
||||
* through the channel, whilst disposing of the actual call record.
|
||||
|
@ -223,9 +225,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
|
|||
__rxrpc_disconnect_call(conn, call);
|
||||
spin_unlock(&conn->channel_lock);
|
||||
|
||||
call->conn = NULL;
|
||||
conn->idle_timestamp = jiffies;
|
||||
rxrpc_put_connection(conn);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -599,10 +599,8 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
|
|||
false, true,
|
||||
rxrpc_propose_ack_input_data);
|
||||
|
||||
if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) {
|
||||
trace_rxrpc_notify_socket(call->debug_id, serial);
|
||||
rxrpc_notify_socket(call);
|
||||
}
|
||||
trace_rxrpc_notify_socket(call->debug_id, serial);
|
||||
rxrpc_notify_socket(call);
|
||||
|
||||
unlock:
|
||||
spin_unlock(&call->input_lock);
|
||||
|
|
|
@ -364,11 +364,14 @@ void rxrpc_queue_local(struct rxrpc_local *local)
|
|||
void rxrpc_put_local(struct rxrpc_local *local)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
unsigned int debug_id;
|
||||
int n;
|
||||
|
||||
if (local) {
|
||||
debug_id = local->debug_id;
|
||||
|
||||
n = atomic_dec_return(&local->usage);
|
||||
trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
|
||||
trace_rxrpc_local(debug_id, rxrpc_local_put, n, here);
|
||||
|
||||
if (n == 0)
|
||||
call_rcu(&local->rcu, rxrpc_local_rcu);
|
||||
|
@ -380,14 +383,11 @@ void rxrpc_put_local(struct rxrpc_local *local)
|
|||
*/
|
||||
struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
|
||||
{
|
||||
unsigned int au;
|
||||
|
||||
local = rxrpc_get_local_maybe(local);
|
||||
if (!local)
|
||||
return NULL;
|
||||
|
||||
au = atomic_fetch_add_unless(&local->active_users, 1, 0);
|
||||
if (au == 0) {
|
||||
if (!__rxrpc_use_local(local)) {
|
||||
rxrpc_put_local(local);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -401,14 +401,11 @@ struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
|
|||
*/
|
||||
void rxrpc_unuse_local(struct rxrpc_local *local)
|
||||
{
|
||||
unsigned int au;
|
||||
|
||||
if (local) {
|
||||
au = atomic_dec_return(&local->active_users);
|
||||
if (au == 0)
|
||||
if (__rxrpc_unuse_local(local)) {
|
||||
rxrpc_get_local(local);
|
||||
rxrpc_queue_local(local);
|
||||
else
|
||||
rxrpc_put_local(local);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -465,7 +462,7 @@ static void rxrpc_local_processor(struct work_struct *work)
|
|||
|
||||
do {
|
||||
again = false;
|
||||
if (atomic_read(&local->active_users) == 0) {
|
||||
if (!__rxrpc_use_local(local)) {
|
||||
rxrpc_local_destroyer(local);
|
||||
break;
|
||||
}
|
||||
|
@ -479,6 +476,8 @@ static void rxrpc_local_processor(struct work_struct *work)
|
|||
rxrpc_process_local_events(local);
|
||||
again = true;
|
||||
}
|
||||
|
||||
__rxrpc_unuse_local(local);
|
||||
} while (again);
|
||||
|
||||
rxrpc_put_local(local);
|
||||
|
|
|
@ -129,7 +129,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
|
|||
int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
|
||||
rxrpc_serial_t *_serial)
|
||||
{
|
||||
struct rxrpc_connection *conn = NULL;
|
||||
struct rxrpc_connection *conn;
|
||||
struct rxrpc_ack_buffer *pkt;
|
||||
struct msghdr msg;
|
||||
struct kvec iov[2];
|
||||
|
@ -139,18 +139,14 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
|
|||
int ret;
|
||||
u8 reason;
|
||||
|
||||
spin_lock_bh(&call->lock);
|
||||
if (call->conn)
|
||||
conn = rxrpc_get_connection_maybe(call->conn);
|
||||
spin_unlock_bh(&call->lock);
|
||||
if (!conn)
|
||||
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
|
||||
return -ECONNRESET;
|
||||
|
||||
pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
|
||||
if (!pkt) {
|
||||
rxrpc_put_connection(conn);
|
||||
if (!pkt)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
conn = call->conn;
|
||||
|
||||
msg.msg_name = &call->peer->srx.transport;
|
||||
msg.msg_namelen = call->peer->srx.transport_len;
|
||||
|
@ -244,7 +240,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
|
|||
}
|
||||
|
||||
out:
|
||||
rxrpc_put_connection(conn);
|
||||
kfree(pkt);
|
||||
return ret;
|
||||
}
|
||||
|
@ -254,7 +249,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
|
|||
*/
|
||||
int rxrpc_send_abort_packet(struct rxrpc_call *call)
|
||||
{
|
||||
struct rxrpc_connection *conn = NULL;
|
||||
struct rxrpc_connection *conn;
|
||||
struct rxrpc_abort_buffer pkt;
|
||||
struct msghdr msg;
|
||||
struct kvec iov[1];
|
||||
|
@ -271,13 +266,11 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
|
|||
test_bit(RXRPC_CALL_TX_LAST, &call->flags))
|
||||
return 0;
|
||||
|
||||
spin_lock_bh(&call->lock);
|
||||
if (call->conn)
|
||||
conn = rxrpc_get_connection_maybe(call->conn);
|
||||
spin_unlock_bh(&call->lock);
|
||||
if (!conn)
|
||||
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
|
||||
return -ECONNRESET;
|
||||
|
||||
conn = call->conn;
|
||||
|
||||
msg.msg_name = &call->peer->srx.transport;
|
||||
msg.msg_namelen = call->peer->srx.transport_len;
|
||||
msg.msg_control = NULL;
|
||||
|
@ -312,8 +305,6 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
|
|||
trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
|
||||
rxrpc_tx_point_call_abort);
|
||||
rxrpc_tx_backoff(call, ret);
|
||||
|
||||
rxrpc_put_connection(conn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -364,27 +364,31 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
|
|||
if (!rxrpc_get_peer_maybe(peer))
|
||||
continue;
|
||||
|
||||
spin_unlock_bh(&rxnet->peer_hash_lock);
|
||||
if (__rxrpc_use_local(peer->local)) {
|
||||
spin_unlock_bh(&rxnet->peer_hash_lock);
|
||||
|
||||
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
|
||||
slot = keepalive_at - base;
|
||||
_debug("%02x peer %u t=%d {%pISp}",
|
||||
cursor, peer->debug_id, slot, &peer->srx.transport);
|
||||
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
|
||||
slot = keepalive_at - base;
|
||||
_debug("%02x peer %u t=%d {%pISp}",
|
||||
cursor, peer->debug_id, slot, &peer->srx.transport);
|
||||
|
||||
if (keepalive_at <= base ||
|
||||
keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
|
||||
rxrpc_send_keepalive(peer);
|
||||
slot = RXRPC_KEEPALIVE_TIME;
|
||||
if (keepalive_at <= base ||
|
||||
keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
|
||||
rxrpc_send_keepalive(peer);
|
||||
slot = RXRPC_KEEPALIVE_TIME;
|
||||
}
|
||||
|
||||
/* A transmission to this peer occurred since last we
|
||||
* examined it so put it into the appropriate future
|
||||
* bucket.
|
||||
*/
|
||||
slot += cursor;
|
||||
slot &= mask;
|
||||
spin_lock_bh(&rxnet->peer_hash_lock);
|
||||
list_add_tail(&peer->keepalive_link,
|
||||
&rxnet->peer_keepalive[slot & mask]);
|
||||
rxrpc_unuse_local(peer->local);
|
||||
}
|
||||
|
||||
/* A transmission to this peer occurred since last we examined
|
||||
* it so put it into the appropriate future bucket.
|
||||
*/
|
||||
slot += cursor;
|
||||
slot &= mask;
|
||||
spin_lock_bh(&rxnet->peer_hash_lock);
|
||||
list_add_tail(&peer->keepalive_link,
|
||||
&rxnet->peer_keepalive[slot & mask]);
|
||||
rxrpc_put_peer_locked(peer);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue