2007-04-27 06:48:28 +08:00
|
|
|
/* AF_RXRPC internal definitions
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
2016-04-04 21:00:32 +08:00
|
|
|
#include <linux/atomic.h>
|
2016-07-01 14:51:50 +08:00
|
|
|
#include <linux/seqlock.h>
|
2016-04-08 00:23:58 +08:00
|
|
|
#include <net/sock.h>
|
2016-04-04 21:00:32 +08:00
|
|
|
#include <net/af_rxrpc.h>
|
2007-04-27 06:48:28 +08:00
|
|
|
#include <rxrpc/packet.h>
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
#define CHECK_SLAB_OKAY(X) \
|
|
|
|
BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
|
|
|
|
(POISON_FREE << 8 | POISON_FREE))
|
|
|
|
#else
|
2016-03-04 23:56:19 +08:00
|
|
|
#define CHECK_SLAB_OKAY(X) do {} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define FCRYPT_BSIZE 8
|
|
|
|
struct rxrpc_crypt {
|
|
|
|
union {
|
|
|
|
u8 x[FCRYPT_BSIZE];
|
2008-03-29 11:08:38 +08:00
|
|
|
__be32 n[2];
|
2007-04-27 06:48:28 +08:00
|
|
|
};
|
|
|
|
} __attribute__((aligned(8)));
|
|
|
|
|
[AF_RXRPC]: Add an interface to the AF_RXRPC module for the AFS filesystem to use
Add an interface to the AF_RXRPC module so that the AFS filesystem module can
more easily make use of the services available. AFS still opens a socket but
then uses the action functions in lieu of sendmsg() and registers an intercept
functions to grab messages before they're queued on the socket Rx queue.
This permits AFS (or whatever) to:
(1) Avoid the overhead of using the recvmsg() call.
(2) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(3) Avoid calling request_key() at the point of issue of a call or opening of
a socket. This is done instead by AFS at the point of open(), unlink() or
other VFS operation and the key handed through.
(4) Request the use of something other than GFP_KERNEL to allocate memory.
Furthermore:
(*) The socket buffer markings used by RxRPC are made available for AFS so
that it can interpret the cooked RxRPC messages itself.
(*) rxgen (un)marshalling abort codes are made available.
The following documentation for the kernel interface is added to
Documentation/networking/rxrpc.txt:
=========================
AF_RXRPC KERNEL INTERFACE
=========================
The AF_RXRPC module also provides an interface for use by in-kernel utilities
such as the AFS filesystem. This permits such a utility to:
(1) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(2) Avoid having RxRPC call request_key() at the point of issue of a call or
opening of a socket. Instead the utility is responsible for requesting a
key at the appropriate point. AFS, for instance, would do this during VFS
operations such as open() or unlink(). The key is then handed through
when the call is initiated.
(3) Request the use of something other than GFP_KERNEL to allocate memory.
(4) Avoid the overhead of using the recvmsg() call. RxRPC messages can be
intercepted before they get put into the socket Rx queue and the socket
buffers manipulated directly.
To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket,
bind an addess as appropriate and listen if it's to be a server socket, but
then it passes this to the kernel interface functions.
The kernel interface functions are as follows:
(*) Begin a new client call.
struct rxrpc_call *
rxrpc_kernel_begin_call(struct socket *sock,
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
gfp_t gfp);
This allocates the infrastructure to make a new RxRPC call and assigns
call and connection numbers. The call will be made on the UDP port that
the socket is bound to. The call will go to the destination address of a
connected client socket unless an alternative is supplied (srx is
non-NULL).
If a key is supplied then this will be used to secure the call instead of
the key bound to the socket with the RXRPC_SECURITY_KEY sockopt. Calls
secured in this way will still share connections if at all possible.
The user_call_ID is equivalent to that supplied to sendmsg() in the
control data buffer. It is entirely feasible to use this to point to a
kernel data structure.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) End a client call.
void rxrpc_kernel_end_call(struct rxrpc_call *call);
This is used to end a previously begun call. The user_call_ID is expunged
from AF_RXRPC's knowledge and will not be seen again in association with
the specified call.
(*) Send data through a call.
int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
size_t len);
This is used to supply either the request part of a client call or the
reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the
data buffers to be used. msg_iov may not be NULL and must point
exclusively to in-kernel virtual addresses. msg.msg_flags may be given
MSG_MORE if there will be subsequent data sends for this call.
The msg must not specify a destination address, control data or any flags
other than MSG_MORE. len is the total amount of data to transmit.
(*) Abort a call.
void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code);
This is used to abort a call if it's still in an abortable state. The
abort code specified will be placed in the ABORT message sent.
(*) Intercept received RxRPC messages.
typedef void (*rxrpc_interceptor_t)(struct sock *sk,
unsigned long user_call_ID,
struct sk_buff *skb);
void
rxrpc_kernel_intercept_rx_messages(struct socket *sock,
rxrpc_interceptor_t interceptor);
This installs an interceptor function on the specified AF_RXRPC socket.
All messages that would otherwise wind up in the socket's Rx queue are
then diverted to this function. Note that care must be taken to process
the messages in the right order to maintain DATA message sequentiality.
The interceptor function itself is provided with the address of the socket
and handling the incoming message, the ID assigned by the kernel utility
to the call and the socket buffer containing the message.
The skb->mark field indicates the type of message:
MARK MEANING
=============================== =======================================
RXRPC_SKB_MARK_DATA Data message
RXRPC_SKB_MARK_FINAL_ACK Final ACK received for an incoming call
RXRPC_SKB_MARK_BUSY Client call rejected as server busy
RXRPC_SKB_MARK_REMOTE_ABORT Call aborted by peer
RXRPC_SKB_MARK_NET_ERROR Network error detected
RXRPC_SKB_MARK_LOCAL_ERROR Local error encountered
RXRPC_SKB_MARK_NEW_CALL New incoming call awaiting acceptance
The remote abort message can be probed with rxrpc_kernel_get_abort_code().
The two error messages can be probed with rxrpc_kernel_get_error_number().
A new call can be accepted with rxrpc_kernel_accept_call().
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
data message has been used up, rxrpc_kernel_data_delivered() should be
called on it..
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
of. It is possible to get extra refs on all types of message for later
freeing, but this may pin the state of a call until the message is finally
freed.
(*) Accept an incoming call.
struct rxrpc_call *
rxrpc_kernel_accept_call(struct socket *sock,
unsigned long user_call_ID);
This is used to accept an incoming call and to assign it a call ID. This
function is similar to rxrpc_kernel_begin_call() and calls accepted must
be ended in the same way.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) Reject an incoming call.
int rxrpc_kernel_reject_call(struct socket *sock);
This is used to reject the first incoming call on the socket's queue with
a BUSY message. -ENODATA is returned if there were no incoming calls.
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
(*) Record the delivery of a data message and free it.
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
This is used to record a data message as having been delivered and to
update the ACK state for the call. The socket buffer will be freed.
(*) Free a message.
void rxrpc_kernel_free_skb(struct sk_buff *skb);
This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC
socket.
(*) Determine if a data message is the last one on a call.
bool rxrpc_kernel_is_data_last(struct sk_buff *skb);
This is used to determine if a socket buffer holds the last data message
to be received for a call (true will be returned if it does, false
if not).
The data message will be part of the reply on a client call and the
request on an incoming call. In the latter case there will be more
messages, but in the former case there will not.
(*) Get the abort code from an abort message.
u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb);
This is used to extract the abort code from a remote abort message.
(*) Get the error number from a local or network error message.
int rxrpc_kernel_get_error_number(struct sk_buff *skb);
This is used to extract the error number from a message indicating either
a local error occurred or a network error occurred.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-27 06:50:17 +08:00
|
|
|
#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
|
|
|
|
#define rxrpc_queue_delayed_work(WS,D) \
|
|
|
|
queue_delayed_work(rxrpc_workqueue, (WS), (D))
|
|
|
|
|
2016-04-04 21:00:37 +08:00
|
|
|
struct rxrpc_connection;
|
|
|
|
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-31 03:42:14 +08:00
|
|
|
/*
|
|
|
|
* Mark applied to socket buffers.
|
|
|
|
*/
|
|
|
|
enum rxrpc_skb_mark {
|
|
|
|
RXRPC_SKB_MARK_DATA, /* data message */
|
|
|
|
RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
|
|
|
|
RXRPC_SKB_MARK_BUSY, /* server busy message */
|
|
|
|
RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
|
|
|
|
RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
|
|
|
|
RXRPC_SKB_MARK_NET_ERROR, /* network error message */
|
|
|
|
RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
|
|
|
|
RXRPC_SKB_MARK_NEW_CALL, /* local error message */
|
|
|
|
};
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
|
|
|
* sk_state for RxRPC sockets
|
|
|
|
*/
|
|
|
|
enum {
|
2016-06-10 06:02:51 +08:00
|
|
|
RXRPC_UNBOUND = 0,
|
|
|
|
RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
|
2007-04-27 06:48:28 +08:00
|
|
|
RXRPC_CLIENT_BOUND, /* client local address bound */
|
|
|
|
RXRPC_SERVER_BOUND, /* server local address bound */
|
|
|
|
RXRPC_SERVER_LISTENING, /* server listening for connections */
|
|
|
|
RXRPC_CLOSE, /* socket is being closed */
|
|
|
|
};
|
|
|
|
|
2016-09-08 18:10:12 +08:00
|
|
|
/*
|
|
|
|
* Service backlog preallocation.
|
|
|
|
*
|
|
|
|
* This contains circular buffers of preallocated peers, connections and calls
|
|
|
|
* for incoming service calls and their head and tail pointers. This allows
|
|
|
|
* calls to be set up in the data_ready handler, thereby avoiding the need to
|
|
|
|
* shuffle packets around so much.
|
|
|
|
*/
|
|
|
|
struct rxrpc_backlog {
|
|
|
|
unsigned short peer_backlog_head;
|
|
|
|
unsigned short peer_backlog_tail;
|
|
|
|
unsigned short conn_backlog_head;
|
|
|
|
unsigned short conn_backlog_tail;
|
|
|
|
unsigned short call_backlog_head;
|
|
|
|
unsigned short call_backlog_tail;
|
|
|
|
#define RXRPC_BACKLOG_MAX 32
|
|
|
|
struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
|
|
|
|
struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
|
|
|
|
struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
|
|
|
|
};
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
|
|
|
* RxRPC socket definition
|
|
|
|
*/
|
|
|
|
struct rxrpc_sock {
|
|
|
|
/* WARNING: sk has to be the first member */
|
|
|
|
struct sock sk;
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-31 03:42:14 +08:00
|
|
|
rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
|
2016-09-08 18:10:12 +08:00
|
|
|
rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct rxrpc_local *local; /* local endpoint */
|
2016-09-08 18:10:11 +08:00
|
|
|
struct hlist_node listen_link; /* link in the local endpoint's listen list */
|
2016-09-08 18:10:12 +08:00
|
|
|
struct rxrpc_backlog *backlog; /* Preallocation for services */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */
|
|
|
|
struct list_head sock_calls; /* List of calls owned by this socket */
|
|
|
|
struct list_head to_be_accepted; /* calls awaiting acceptance */
|
|
|
|
struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
|
|
|
|
rwlock_t recvmsg_lock; /* Lock for recvmsg_q */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct key *key; /* security for this socket */
|
|
|
|
struct key *securities; /* list of server security descriptors */
|
2016-09-08 18:10:12 +08:00
|
|
|
struct rb_root calls; /* User ID -> call mapping */
|
2007-04-27 06:48:28 +08:00
|
|
|
unsigned long flags;
|
2016-06-10 06:02:51 +08:00
|
|
|
#define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
|
2007-04-27 06:48:28 +08:00
|
|
|
rwlock_t call_lock; /* lock for calls */
|
|
|
|
u32 min_sec_level; /* minimum security level */
|
|
|
|
#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
|
2016-04-04 21:00:37 +08:00
|
|
|
bool exclusive; /* Exclusive connection for a client socket */
|
|
|
|
sa_family_t family; /* Protocol family created with */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct sockaddr_rxrpc srx; /* local address */
|
2016-06-10 06:02:51 +08:00
|
|
|
struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
|
2007-04-27 06:48:28 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
|
|
|
|
|
2016-03-04 23:53:46 +08:00
|
|
|
/*
|
|
|
|
* CPU-byteorder normalised Rx packet header.
|
|
|
|
*/
|
|
|
|
struct rxrpc_host_header {
|
|
|
|
u32 epoch; /* client boot timestamp */
|
|
|
|
u32 cid; /* connection and channel ID */
|
|
|
|
u32 callNumber; /* call ID (0 for connection-level packets) */
|
|
|
|
u32 seq; /* sequence number of pkt in call stream */
|
|
|
|
u32 serial; /* serial number of pkt sent to network */
|
|
|
|
u8 type; /* packet type */
|
|
|
|
u8 flags; /* packet flags */
|
|
|
|
u8 userStatus; /* app-layer defined status */
|
|
|
|
u8 securityIndex; /* security protocol ID */
|
|
|
|
union {
|
|
|
|
u16 _rsvd; /* reserved */
|
|
|
|
u16 cksum; /* kerberos security checksum */
|
|
|
|
};
|
|
|
|
u16 serviceId; /* service ID */
|
|
|
|
} __packed;
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
|
|
|
* RxRPC socket buffer private variables
|
|
|
|
* - max 48 bytes (struct sk_buff::cb)
|
|
|
|
*/
|
|
|
|
struct rxrpc_skb_priv {
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
union {
|
2016-09-22 07:29:31 +08:00
|
|
|
u8 nr_jumbo; /* Number of jumbo subpackets */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
};
|
2007-04-27 06:48:28 +08:00
|
|
|
union {
|
2012-04-15 13:58:06 +08:00
|
|
|
unsigned int offset; /* offset into buffer of next read */
|
2007-04-27 06:48:28 +08:00
|
|
|
int remain; /* amount of space remaining for next write */
|
|
|
|
u32 error; /* network error code */
|
|
|
|
};
|
|
|
|
|
2016-03-04 23:53:46 +08:00
|
|
|
struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
|
2007-04-27 06:48:28 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RxRPC security module interface
|
|
|
|
*/
|
|
|
|
struct rxrpc_security {
|
|
|
|
const char *name; /* name of this service */
|
|
|
|
u8 security_index; /* security type provided */
|
|
|
|
|
2016-04-08 00:23:51 +08:00
|
|
|
/* Initialise a security service */
|
|
|
|
int (*init)(void);
|
|
|
|
|
|
|
|
/* Clean up a security service */
|
|
|
|
void (*exit)(void);
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/* initialise a connection's security */
|
|
|
|
int (*init_connection_security)(struct rxrpc_connection *);
|
|
|
|
|
|
|
|
/* prime a connection's packet security */
|
2016-06-27 05:55:24 +08:00
|
|
|
int (*prime_packet_security)(struct rxrpc_connection *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/* impose security on a packet */
|
2016-06-27 05:55:24 +08:00
|
|
|
int (*secure_packet)(struct rxrpc_call *,
|
2007-04-27 06:48:28 +08:00
|
|
|
struct sk_buff *,
|
|
|
|
size_t,
|
|
|
|
void *);
|
|
|
|
|
|
|
|
/* verify the security on a received packet */
|
2016-09-07 05:19:51 +08:00
|
|
|
int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
unsigned int, unsigned int, rxrpc_seq_t, u16);
|
|
|
|
|
|
|
|
/* Locate the data in a received packet that has been verified. */
|
|
|
|
void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
|
|
|
|
unsigned int *, unsigned int *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/* issue a challenge */
|
|
|
|
int (*issue_challenge)(struct rxrpc_connection *);
|
|
|
|
|
|
|
|
/* respond to a challenge */
|
|
|
|
int (*respond_to_challenge)(struct rxrpc_connection *,
|
|
|
|
struct sk_buff *,
|
|
|
|
u32 *);
|
|
|
|
|
|
|
|
/* verify a response */
|
|
|
|
int (*verify_response)(struct rxrpc_connection *,
|
|
|
|
struct sk_buff *,
|
|
|
|
u32 *);
|
|
|
|
|
|
|
|
/* clear connection security */
|
|
|
|
void (*clear)(struct rxrpc_connection *);
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
2016-04-04 21:00:35 +08:00
|
|
|
* RxRPC local transport endpoint description
|
|
|
|
* - owned by a single AF_RXRPC socket
|
|
|
|
* - pointed to by transport socket struct sk_user_data
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
|
|
|
struct rxrpc_local {
|
2016-04-04 21:00:35 +08:00
|
|
|
struct rcu_head rcu;
|
|
|
|
atomic_t usage;
|
|
|
|
struct list_head link;
|
2007-04-27 06:48:28 +08:00
|
|
|
struct socket *socket; /* my UDP socket */
|
2016-04-04 21:00:35 +08:00
|
|
|
struct work_struct processor;
|
2016-09-08 18:10:11 +08:00
|
|
|
struct hlist_head services; /* services listening on this endpoint */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
|
|
|
|
struct sk_buff_head reject_queue; /* packets awaiting rejection */
|
2015-04-01 23:31:26 +08:00
|
|
|
struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
|
2016-06-17 22:42:35 +08:00
|
|
|
struct rb_root client_conns; /* Client connections by socket params */
|
|
|
|
spinlock_t client_conns_lock; /* Lock for client_conns */
|
2007-04-27 06:48:28 +08:00
|
|
|
spinlock_t lock; /* access lock */
|
|
|
|
rwlock_t services_lock; /* lock for services list */
|
|
|
|
int debug_id; /* debug ID for printks */
|
2016-04-04 21:00:35 +08:00
|
|
|
bool dead;
|
2007-04-27 06:48:28 +08:00
|
|
|
struct sockaddr_rxrpc srx; /* local address */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RxRPC remote transport endpoint definition
|
2016-04-04 21:00:32 +08:00
|
|
|
* - matched by local endpoint, remote port, address and protocol type
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
|
|
|
struct rxrpc_peer {
|
2016-04-04 21:00:32 +08:00
|
|
|
struct rcu_head rcu; /* This must be first */
|
|
|
|
atomic_t usage;
|
|
|
|
unsigned long hash_key;
|
|
|
|
struct hlist_node hash_link;
|
|
|
|
struct rxrpc_local *local;
|
2016-04-04 21:00:34 +08:00
|
|
|
struct hlist_head error_targets; /* targets for net error distribution */
|
|
|
|
struct work_struct error_distributor;
|
2016-06-17 17:06:56 +08:00
|
|
|
struct rb_root service_conns; /* Service connections */
|
2016-07-01 14:51:50 +08:00
|
|
|
seqlock_t service_conn_lock;
|
2007-04-27 06:48:28 +08:00
|
|
|
spinlock_t lock; /* access lock */
|
2012-04-15 13:58:06 +08:00
|
|
|
unsigned int if_mtu; /* interface MTU for this peer */
|
|
|
|
unsigned int mtu; /* network MTU for this peer */
|
|
|
|
unsigned int maxdata; /* data size (MTU - hdrsize) */
|
2007-04-27 06:48:28 +08:00
|
|
|
unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
|
|
|
|
int debug_id; /* debug ID for printks */
|
2016-04-04 21:00:34 +08:00
|
|
|
int error_report; /* Net (+0) or local (+1000000) to distribute */
|
|
|
|
#define RXRPC_LOCAL_ERROR_OFFSET 1000000
|
2007-04-27 06:48:28 +08:00
|
|
|
struct sockaddr_rxrpc srx; /* remote address */
|
|
|
|
|
|
|
|
/* calculated RTT cache */
|
|
|
|
#define RXRPC_RTT_CACHE_SIZE 32
|
2016-09-22 07:29:31 +08:00
|
|
|
ktime_t rtt_last_req; /* Time of last RTT request */
|
2016-09-22 07:41:53 +08:00
|
|
|
u64 rtt; /* Current RTT estimate (in nS) */
|
|
|
|
u64 rtt_sum; /* Sum of cache contents */
|
|
|
|
u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
|
|
|
|
u8 rtt_cursor; /* next entry at which to insert */
|
|
|
|
u8 rtt_usage; /* amount of cache actually used */
|
2007-04-27 06:48:28 +08:00
|
|
|
};
|
|
|
|
|
2016-04-04 21:00:36 +08:00
|
|
|
/*
|
|
|
|
* Keys for matching a connection.
|
|
|
|
*/
|
|
|
|
struct rxrpc_conn_proto {
|
2016-06-30 19:16:21 +08:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
u32 epoch; /* epoch of this connection */
|
|
|
|
u32 cid; /* connection ID */
|
|
|
|
};
|
|
|
|
u64 index_key;
|
2016-04-04 21:00:36 +08:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rxrpc_conn_parameters {
|
|
|
|
struct rxrpc_local *local; /* Representation of local endpoint */
|
|
|
|
struct rxrpc_peer *peer; /* Remote endpoint */
|
|
|
|
struct key *key; /* Security details */
|
|
|
|
bool exclusive; /* T if conn is exclusive */
|
|
|
|
u16 service_id; /* Service ID for this connection */
|
|
|
|
u32 security_level; /* Security level selected */
|
|
|
|
};
|
|
|
|
|
2016-06-27 17:32:02 +08:00
|
|
|
/*
|
|
|
|
* Bits in the connection flags.
|
|
|
|
*/
|
|
|
|
enum rxrpc_conn_flag {
|
|
|
|
RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
|
2016-06-30 17:45:22 +08:00
|
|
|
RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
|
|
|
|
RXRPC_CONN_IN_CLIENT_CONNS, /* Conn is in local->client_conns */
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */
|
|
|
|
RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
|
|
|
|
RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
|
2016-06-27 17:32:02 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Events that can be raised upon a connection.
|
|
|
|
*/
|
|
|
|
enum rxrpc_conn_event {
|
|
|
|
RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
|
|
|
|
};
|
|
|
|
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
/*
|
|
|
|
* The connection cache state.
|
|
|
|
*/
|
|
|
|
enum rxrpc_conn_cache_state {
|
|
|
|
RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */
|
|
|
|
RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */
|
|
|
|
RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */
|
|
|
|
RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */
|
|
|
|
RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */
|
2016-09-17 17:49:14 +08:00
|
|
|
RXRPC_CONN__NR_CACHE_STATES
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
};
|
|
|
|
|
2016-06-27 17:32:02 +08:00
|
|
|
/*
|
|
|
|
* The connection protocol state.
|
|
|
|
*/
|
|
|
|
enum rxrpc_conn_proto_state {
|
|
|
|
RXRPC_CONN_UNUSED, /* Connection not yet attempted */
|
|
|
|
RXRPC_CONN_CLIENT, /* Client connection */
|
2016-09-08 18:10:12 +08:00
|
|
|
RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
|
2016-06-27 17:32:02 +08:00
|
|
|
RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
|
|
|
|
RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
|
|
|
|
RXRPC_CONN_SERVICE, /* Service secured connection */
|
|
|
|
RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
|
|
|
|
RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
|
|
|
|
RXRPC_CONN__NR_STATES
|
|
|
|
};
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
|
|
|
* RxRPC connection definition
|
2016-06-17 17:06:56 +08:00
|
|
|
* - matched by { local, peer, epoch, conn_id, direction }
|
2007-04-27 06:48:28 +08:00
|
|
|
* - each connection can only handle four simultaneous calls
|
|
|
|
*/
|
|
|
|
struct rxrpc_connection {
|
2016-04-04 21:00:36 +08:00
|
|
|
struct rxrpc_conn_proto proto;
|
|
|
|
struct rxrpc_conn_parameters params;
|
|
|
|
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
atomic_t usage;
|
|
|
|
struct rcu_head rcu;
|
|
|
|
struct list_head cache_link;
|
rxrpc: Call channels should have separate call number spaces
Each channel on a connection has a separate, independent number space from
which to allocate callNumber values. It is entirely possible, for example,
to have a connection with four active calls, each with call number 1.
Note that the callNumber values for any particular channel don't have to
start at 1, but they are supposed to increment monotonically for that
channel from a client's perspective and may not be reused once the call
number is transmitted (until the epoch cycles all the way back round).
Currently, however, call numbers are allocated on a per-connection basis
and, further, are held in an rb-tree. The rb-tree is redundant as the four
channel pointers in the rxrpc_connection struct are entirely capable of
pointing to all the calls currently in progress on a connection.
To this end, make the following changes:
(1) Handle call number allocation independently per channel.
(2) Get rid of the conn->calls rb-tree. This is overkill as a connection
may have a maximum of four calls in progress at any one time. Use the
pointers in the channels[] array instead, indexed by the channel
number from the packet.
(3) For each channel, save the result of the last call that was in
progress on that channel in conn->channels[] so that the final ACK or
ABORT packet can be replayed if necessary. Any call earlier than that
is just ignored. If we've seen the next call number in a packet, the
last one is most definitely defunct.
(4) When generating a RESPONSE packet for a connection, the call number
counter for each channel must be included in it.
(5) When parsing a RESPONSE packet for a connection, the call number
counters contained therein should be used to set the minimum expected
call numbers on each channel.
To do in future commits:
(1) Replay terminal packets based on the last call stored in
conn->channels[].
(2) Connections should be retired before the callNumber space on any
channel runs out.
(3) A server is expected to disregard or reject any new incoming call that
has a call number less than the current call number counter. The call
number counter for that channel must be advanced to the new call
number.
Note that the server cannot just require that the next call that it
sees on a channel be exactly the call number counter + 1 because then
there's a scenario that could cause a problem: The client transmits a
packet to initiate a connection, the network goes out, the server
sends an ACK (which gets lost), the client sends an ABORT (which also
gets lost); the network then reconnects, the client then reuses the
call number for the next call (it doesn't know the server already saw
the call number), but the server thinks it already has the first
packet of this call (it doesn't know that the client doesn't know that
it saw the call number the first time).
Signed-off-by: David Howells <dhowells@redhat.com>
2016-06-27 21:39:44 +08:00
|
|
|
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
spinlock_t channel_lock;
|
|
|
|
unsigned char active_chans; /* Mask of active channels */
|
|
|
|
#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
|
|
|
|
struct list_head waiting_calls; /* Calls waiting for channels */
|
rxrpc: Call channels should have separate call number spaces
Each channel on a connection has a separate, independent number space from
which to allocate callNumber values. It is entirely possible, for example,
to have a connection with four active calls, each with call number 1.
Note that the callNumber values for any particular channel don't have to
start at 1, but they are supposed to increment monotonically for that
channel from a client's perspective and may not be reused once the call
number is transmitted (until the epoch cycles all the way back round).
Currently, however, call numbers are allocated on a per-connection basis
and, further, are held in an rb-tree. The rb-tree is redundant as the four
channel pointers in the rxrpc_connection struct are entirely capable of
pointing to all the calls currently in progress on a connection.
To this end, make the following changes:
(1) Handle call number allocation independently per channel.
(2) Get rid of the conn->calls rb-tree. This is overkill as a connection
may have a maximum of four calls in progress at any one time. Use the
pointers in the channels[] array instead, indexed by the channel
number from the packet.
(3) For each channel, save the result of the last call that was in
progress on that channel in conn->channels[] so that the final ACK or
ABORT packet can be replayed if necessary. Any call earlier than that
is just ignored. If we've seen the next call number in a packet, the
last one is most definitely defunct.
(4) When generating a RESPONSE packet for a connection, the call number
counter for each channel must be included in it.
(5) When parsing a RESPONSE packet for a connection, the call number
counters contained therein should be used to set the minimum expected
call numbers on each channel.
To do in future commits:
(1) Replay terminal packets based on the last call stored in
conn->channels[].
(2) Connections should be retired before the callNumber space on any
channel runs out.
(3) A server is expected to disregard or reject any new incoming call that
has a call number less than the current call number counter. The call
number counter for that channel must be advanced to the new call
number.
Note that the server cannot just require that the next call that it
sees on a channel be exactly the call number counter + 1 because then
there's a scenario that could cause a problem: The client transmits a
packet to initiate a connection, the network goes out, the server
sends an ACK (which gets lost), the client sends an ABORT (which also
gets lost); the network then reconnects, the client then reuses the
call number for the next call (it doesn't know the server already saw
the call number), but the server thinks it already has the first
packet of this call (it doesn't know that the client doesn't know that
it saw the call number the first time).
Signed-off-by: David Howells <dhowells@redhat.com>
2016-06-27 21:39:44 +08:00
|
|
|
struct rxrpc_channel {
|
|
|
|
struct rxrpc_call __rcu *call; /* Active call */
|
|
|
|
u32 call_id; /* ID of current call */
|
|
|
|
u32 call_counter; /* Call ID counter */
|
|
|
|
u32 last_call; /* ID of last call */
|
2016-08-23 22:27:25 +08:00
|
|
|
u8 last_type; /* Type of last packet */
|
|
|
|
u16 last_service_id;
|
|
|
|
union {
|
|
|
|
u32 last_seq;
|
|
|
|
u32 last_abort;
|
|
|
|
};
|
rxrpc: Call channels should have separate call number spaces
Each channel on a connection has a separate, independent number space from
which to allocate callNumber values. It is entirely possible, for example,
to have a connection with four active calls, each with call number 1.
Note that the callNumber values for any particular channel don't have to
start at 1, but they are supposed to increment monotonically for that
channel from a client's perspective and may not be reused once the call
number is transmitted (until the epoch cycles all the way back round).
Currently, however, call numbers are allocated on a per-connection basis
and, further, are held in an rb-tree. The rb-tree is redundant as the four
channel pointers in the rxrpc_connection struct are entirely capable of
pointing to all the calls currently in progress on a connection.
To this end, make the following changes:
(1) Handle call number allocation independently per channel.
(2) Get rid of the conn->calls rb-tree. This is overkill as a connection
may have a maximum of four calls in progress at any one time. Use the
pointers in the channels[] array instead, indexed by the channel
number from the packet.
(3) For each channel, save the result of the last call that was in
progress on that channel in conn->channels[] so that the final ACK or
ABORT packet can be replayed if necessary. Any call earlier than that
is just ignored. If we've seen the next call number in a packet, the
last one is most definitely defunct.
(4) When generating a RESPONSE packet for a connection, the call number
counter for each channel must be included in it.
(5) When parsing a RESPONSE packet for a connection, the call number
counters contained therein should be used to set the minimum expected
call numbers on each channel.
To do in future commits:
(1) Replay terminal packets based on the last call stored in
conn->channels[].
(2) Connections should be retired before the callNumber space on any
channel runs out.
(3) A server is expected to disregard or reject any new incoming call that
has a call number less than the current call number counter. The call
number counter for that channel must be advanced to the new call
number.
Note that the server cannot just require that the next call that it
sees on a channel be exactly the call number counter + 1 because then
there's a scenario that could cause a problem: The client transmits a
packet to initiate a connection, the network goes out, the server
sends an ACK (which gets lost), the client sends an ABORT (which also
gets lost); the network then reconnects, the client then reuses the
call number for the next call (it doesn't know the server already saw
the call number), but the server thinks it already has the first
packet of this call (it doesn't know that the client doesn't know that
it saw the call number the first time).
Signed-off-by: David Howells <dhowells@redhat.com>
2016-06-27 21:39:44 +08:00
|
|
|
} channels[RXRPC_MAXCALLS];
|
2016-06-17 22:42:35 +08:00
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
struct work_struct processor; /* connection event processor */
|
2016-06-17 22:42:35 +08:00
|
|
|
union {
|
|
|
|
struct rb_node client_node; /* Node in local->client_conns */
|
2016-06-17 17:06:56 +08:00
|
|
|
struct rb_node service_node; /* Node in peer->service_conns */
|
2016-06-17 22:42:35 +08:00
|
|
|
};
|
2016-08-24 14:30:52 +08:00
|
|
|
struct list_head proc_link; /* link in procfs list */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct list_head link; /* link in master connection list */
|
|
|
|
struct sk_buff_head rx_queue; /* received conn-level packets */
|
2016-04-08 00:23:51 +08:00
|
|
|
const struct rxrpc_security *security; /* applied security module */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct key *server_key; /* security for this service */
|
2016-01-24 21:19:01 +08:00
|
|
|
struct crypto_skcipher *cipher; /* encryption handle */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct rxrpc_crypt csum_iv; /* packet checksum base */
|
2016-04-04 21:00:37 +08:00
|
|
|
unsigned long flags;
|
2007-04-27 06:48:28 +08:00
|
|
|
unsigned long events;
|
2016-08-23 22:27:24 +08:00
|
|
|
unsigned long idle_timestamp; /* Time at which last became idle */
|
2007-04-27 06:48:28 +08:00
|
|
|
spinlock_t state_lock; /* state-change lock */
|
2016-09-08 18:10:11 +08:00
|
|
|
enum rxrpc_conn_cache_state cache_state;
|
|
|
|
enum rxrpc_conn_proto_state state; /* current state of connection */
|
2016-04-08 00:23:30 +08:00
|
|
|
u32 local_abort; /* local abort code */
|
|
|
|
u32 remote_abort; /* remote abort code */
|
2007-04-27 06:48:28 +08:00
|
|
|
int debug_id; /* debug ID for printks */
|
|
|
|
atomic_t serial; /* packet serial number counter */
|
2016-08-23 22:27:25 +08:00
|
|
|
unsigned int hi_serial; /* highest serial number received */
|
2016-09-22 07:29:31 +08:00
|
|
|
u32 security_nonce; /* response re-use preventer */
|
2007-04-27 06:48:28 +08:00
|
|
|
u8 size_align; /* data size alignment (for security) */
|
|
|
|
u8 security_size; /* security header size */
|
|
|
|
u8 security_ix; /* security type */
|
|
|
|
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
|
|
|
|
};
|
|
|
|
|
2016-03-04 23:53:46 +08:00
|
|
|
/*
|
|
|
|
* Flags in call->flags.
|
|
|
|
*/
|
|
|
|
enum rxrpc_call_flag {
|
|
|
|
RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
|
|
|
|
RXRPC_CALL_HAS_USERID, /* has a user ID attached */
|
2016-08-23 22:27:24 +08:00
|
|
|
RXRPC_CALL_IS_SERVICE, /* Call is service call */
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
|
|
|
|
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
|
2016-09-22 07:29:31 +08:00
|
|
|
RXRPC_CALL_PINGING, /* Ping in process */
|
2016-03-04 23:53:46 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Events that can be raised on a call.
|
|
|
|
*/
|
|
|
|
enum rxrpc_call_event {
|
2016-03-04 23:53:46 +08:00
|
|
|
RXRPC_CALL_EV_ACK, /* need to generate ACK */
|
|
|
|
RXRPC_CALL_EV_ABORT, /* need to generate abort */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
RXRPC_CALL_EV_TIMER, /* Timer expired */
|
2016-03-04 23:53:46 +08:00
|
|
|
RXRPC_CALL_EV_RESEND, /* Tx resend required */
|
2016-03-04 23:53:46 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The states that a call can be in.
|
|
|
|
*/
|
|
|
|
enum rxrpc_call_state {
|
2016-06-17 22:42:35 +08:00
|
|
|
RXRPC_CALL_UNINITIALISED,
|
|
|
|
RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
|
2016-03-04 23:53:46 +08:00
|
|
|
RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
|
|
|
|
RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
|
|
|
|
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
|
2016-09-08 18:10:12 +08:00
|
|
|
RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
|
2016-03-04 23:53:46 +08:00
|
|
|
RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
|
|
|
|
RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
|
|
|
|
RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
|
|
|
|
RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
|
|
|
|
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
|
|
|
|
RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
|
2016-08-30 16:49:28 +08:00
|
|
|
RXRPC_CALL_COMPLETE, /* - call complete */
|
|
|
|
NR__RXRPC_CALL_STATES
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call completion condition (state == RXRPC_CALL_COMPLETE).
|
|
|
|
*/
|
|
|
|
enum rxrpc_call_completion {
|
|
|
|
RXRPC_CALL_SUCCEEDED, /* - Normal termination */
|
2016-03-04 23:53:46 +08:00
|
|
|
RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
|
|
|
|
RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
|
2016-08-30 16:49:28 +08:00
|
|
|
RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
|
2016-03-04 23:53:46 +08:00
|
|
|
RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
|
2016-08-30 16:49:28 +08:00
|
|
|
NR__RXRPC_CALL_COMPLETIONS
|
2016-03-04 23:53:46 +08:00
|
|
|
};
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
|
|
|
* RxRPC call definition
|
|
|
|
* - matched by { connection, call_id }
|
|
|
|
*/
|
|
|
|
struct rxrpc_call {
|
2016-06-28 00:11:19 +08:00
|
|
|
struct rcu_head rcu;
|
2007-04-27 06:48:28 +08:00
|
|
|
struct rxrpc_connection *conn; /* connection carrying call */
|
2016-08-24 21:31:43 +08:00
|
|
|
struct rxrpc_peer *peer; /* Peer record for remote address */
|
2016-09-07 16:19:31 +08:00
|
|
|
struct rxrpc_sock __rcu *socket; /* socket responsible */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
unsigned long ack_at; /* When deferred ACK needs to happen */
|
|
|
|
unsigned long resend_at; /* When next resend needs to happen */
|
|
|
|
unsigned long expire_at; /* When the call times out */
|
|
|
|
struct timer_list timer; /* Combined event timer */
|
|
|
|
struct work_struct processor; /* Event processor */
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-31 03:42:14 +08:00
|
|
|
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct list_head link; /* link in master call list */
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
struct list_head chan_wait_link; /* Link in conn->waiting_calls */
|
2016-04-04 21:00:34 +08:00
|
|
|
struct hlist_node error_link; /* link in error distribution list */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
struct list_head accept_link; /* Link in rx->acceptq */
|
|
|
|
struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
|
|
|
|
struct list_head sock_link; /* Link in rx->sock_calls */
|
|
|
|
struct rb_node sock_node; /* Node in rx->calls */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct sk_buff *tx_pending; /* Tx socket buffer being filled */
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
wait_queue_head_t waitq; /* Wait queue for channel or Tx */
|
2016-06-27 05:55:24 +08:00
|
|
|
__be32 crypto_buf[2]; /* Temporary packet crypto buffer */
|
2007-04-27 06:48:28 +08:00
|
|
|
unsigned long user_call_ID; /* user-defined call ID */
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long events;
|
|
|
|
spinlock_t lock;
|
|
|
|
rwlock_t state_lock; /* lock for state transition */
|
2016-08-30 16:49:28 +08:00
|
|
|
u32 abort_code; /* Local/remote abort code */
|
|
|
|
int error; /* Local error incurred */
|
2016-09-08 18:10:11 +08:00
|
|
|
enum rxrpc_call_state state; /* current state of call */
|
|
|
|
enum rxrpc_call_completion completion; /* Call completion condition */
|
2007-04-27 06:48:28 +08:00
|
|
|
atomic_t usage;
|
2016-08-23 22:27:24 +08:00
|
|
|
u16 service_id; /* service ID */
|
2016-09-07 22:19:25 +08:00
|
|
|
u8 security_ix; /* Security type */
|
2016-08-23 22:27:24 +08:00
|
|
|
u32 call_id; /* call ID on connection */
|
|
|
|
u32 cid; /* connection ID plus channel index */
|
|
|
|
int debug_id; /* debug ID for printks */
|
2016-09-22 07:29:31 +08:00
|
|
|
unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
|
|
|
|
unsigned short rx_pkt_len; /* Current recvmsg packet len */
|
2007-04-27 06:48:28 +08:00
|
|
|
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
/* Rx/Tx circular buffer, depending on phase.
|
|
|
|
*
|
|
|
|
* In the Rx phase, packets are annotated with 0 or the number of the
|
|
|
|
* segment of a jumbo packet each buffer refers to. There can be up to
|
|
|
|
* 47 segments in a maximum-size UDP packet.
|
|
|
|
*
|
|
|
|
* In the Tx phase, packets are annotated with which buffers have been
|
|
|
|
* acked.
|
|
|
|
*/
|
|
|
|
#define RXRPC_RXTX_BUFF_SIZE 64
|
|
|
|
#define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
|
2016-09-14 05:36:22 +08:00
|
|
|
#define RXRPC_INIT_RX_WINDOW_SIZE 32
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
struct sk_buff **rxtx_buffer;
|
|
|
|
u8 *rxtx_annotations;
|
|
|
|
#define RXRPC_TX_ANNO_ACK 0
|
|
|
|
#define RXRPC_TX_ANNO_UNACK 1
|
|
|
|
#define RXRPC_TX_ANNO_NAK 2
|
|
|
|
#define RXRPC_TX_ANNO_RETRANS 3
|
2016-09-22 07:29:32 +08:00
|
|
|
#define RXRPC_TX_ANNO_MASK 0x03
|
2016-09-23 19:39:22 +08:00
|
|
|
#define RXRPC_TX_ANNO_LAST 0x04
|
|
|
|
#define RXRPC_TX_ANNO_RESENT 0x08
|
|
|
|
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
#define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */
|
|
|
|
#define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
|
|
|
|
#define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
|
|
|
|
rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
|
|
|
|
* not hard-ACK'd packet follows this.
|
|
|
|
*/
|
|
|
|
rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
|
|
|
|
rxrpc_seq_t rx_hard_ack; /* Dead slot in buffer; the first received but not
|
|
|
|
* consumed packet follows this.
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
rxrpc_seq_t rx_top; /* Highest Rx slot allocated. */
|
|
|
|
rxrpc_seq_t rx_expect_next; /* Expected next packet sequence number */
|
|
|
|
u8 rx_winsize; /* Size of Rx window */
|
|
|
|
u8 tx_winsize; /* Maximum size of Tx window */
|
2016-09-17 17:49:14 +08:00
|
|
|
bool tx_phase; /* T if transmission phase, F if receive phase */
|
2016-09-14 05:36:22 +08:00
|
|
|
u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/* receive-phase ACK management */
|
2009-09-16 15:01:13 +08:00
|
|
|
u8 ackr_reason; /* reason to ACK */
|
2016-08-23 22:27:25 +08:00
|
|
|
u16 ackr_skew; /* skew on packet being ACK'd */
|
2016-03-04 23:53:46 +08:00
|
|
|
rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
|
2016-09-25 01:05:26 +08:00
|
|
|
rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
|
|
|
|
rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
|
2016-09-22 07:29:31 +08:00
|
|
|
rxrpc_serial_t ackr_ping; /* Last ping sent */
|
|
|
|
ktime_t ackr_ping_time; /* Time last ping sent */
|
2007-04-27 06:48:28 +08:00
|
|
|
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
/* transmission-phase ACK management */
|
|
|
|
rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
|
2007-04-27 06:48:28 +08:00
|
|
|
};
|
|
|
|
|
2016-09-17 17:49:14 +08:00
|
|
|
enum rxrpc_skb_trace {
|
|
|
|
rxrpc_skb_rx_cleaned,
|
|
|
|
rxrpc_skb_rx_freed,
|
|
|
|
rxrpc_skb_rx_got,
|
|
|
|
rxrpc_skb_rx_lost,
|
|
|
|
rxrpc_skb_rx_received,
|
|
|
|
rxrpc_skb_rx_rotated,
|
|
|
|
rxrpc_skb_rx_purged,
|
|
|
|
rxrpc_skb_rx_seen,
|
|
|
|
rxrpc_skb_tx_cleaned,
|
|
|
|
rxrpc_skb_tx_freed,
|
|
|
|
rxrpc_skb_tx_got,
|
|
|
|
rxrpc_skb_tx_lost,
|
|
|
|
rxrpc_skb_tx_new,
|
|
|
|
rxrpc_skb_tx_rotated,
|
|
|
|
rxrpc_skb_tx_seen,
|
|
|
|
rxrpc_skb__nr_trace
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const char rxrpc_skb_traces[rxrpc_skb__nr_trace][7];
|
|
|
|
|
2016-09-17 17:49:14 +08:00
|
|
|
enum rxrpc_conn_trace {
|
|
|
|
rxrpc_conn_new_client,
|
|
|
|
rxrpc_conn_new_service,
|
|
|
|
rxrpc_conn_queued,
|
|
|
|
rxrpc_conn_seen,
|
|
|
|
rxrpc_conn_got,
|
|
|
|
rxrpc_conn_put_client,
|
|
|
|
rxrpc_conn_put_service,
|
|
|
|
rxrpc_conn__nr_trace
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const char rxrpc_conn_traces[rxrpc_conn__nr_trace][4];
|
|
|
|
|
|
|
|
enum rxrpc_client_trace {
|
|
|
|
rxrpc_client_activate_chans,
|
|
|
|
rxrpc_client_alloc,
|
|
|
|
rxrpc_client_chan_activate,
|
|
|
|
rxrpc_client_chan_disconnect,
|
|
|
|
rxrpc_client_chan_pass,
|
|
|
|
rxrpc_client_chan_unstarted,
|
|
|
|
rxrpc_client_cleanup,
|
|
|
|
rxrpc_client_count,
|
|
|
|
rxrpc_client_discard,
|
|
|
|
rxrpc_client_duplicate,
|
|
|
|
rxrpc_client_exposed,
|
|
|
|
rxrpc_client_replace,
|
|
|
|
rxrpc_client_to_active,
|
|
|
|
rxrpc_client_to_culled,
|
|
|
|
rxrpc_client_to_idle,
|
|
|
|
rxrpc_client_to_inactive,
|
|
|
|
rxrpc_client_to_waiting,
|
|
|
|
rxrpc_client_uncount,
|
|
|
|
rxrpc_client__nr_trace
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const char rxrpc_client_traces[rxrpc_client__nr_trace][7];
|
|
|
|
extern const char rxrpc_conn_cache_states[RXRPC_CONN__NR_CACHE_STATES][5];
|
|
|
|
|
2016-09-07 21:34:21 +08:00
|
|
|
enum rxrpc_call_trace {
|
|
|
|
rxrpc_call_new_client,
|
|
|
|
rxrpc_call_new_service,
|
|
|
|
rxrpc_call_queued,
|
|
|
|
rxrpc_call_queued_ref,
|
|
|
|
rxrpc_call_seen,
|
2016-09-17 17:49:14 +08:00
|
|
|
rxrpc_call_connected,
|
|
|
|
rxrpc_call_release,
|
2016-09-07 21:34:21 +08:00
|
|
|
rxrpc_call_got,
|
|
|
|
rxrpc_call_got_userid,
|
2016-09-13 16:12:34 +08:00
|
|
|
rxrpc_call_got_kernel,
|
2016-09-07 21:34:21 +08:00
|
|
|
rxrpc_call_put,
|
|
|
|
rxrpc_call_put_userid,
|
2016-09-13 16:12:34 +08:00
|
|
|
rxrpc_call_put_kernel,
|
2016-09-07 21:34:21 +08:00
|
|
|
rxrpc_call_put_noqueue,
|
2016-09-17 17:49:14 +08:00
|
|
|
rxrpc_call_error,
|
2016-09-07 21:34:21 +08:00
|
|
|
rxrpc_call__nr_trace
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const char rxrpc_call_traces[rxrpc_call__nr_trace][4];
|
|
|
|
|
2016-09-17 17:49:13 +08:00
|
|
|
enum rxrpc_transmit_trace {
|
|
|
|
rxrpc_transmit_wait,
|
|
|
|
rxrpc_transmit_queue,
|
|
|
|
rxrpc_transmit_queue_last,
|
|
|
|
rxrpc_transmit_rotate,
|
2016-09-23 19:39:22 +08:00
|
|
|
rxrpc_transmit_rotate_last,
|
|
|
|
rxrpc_transmit_await_reply,
|
2016-09-17 17:49:13 +08:00
|
|
|
rxrpc_transmit_end,
|
|
|
|
rxrpc_transmit__nr_trace
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const char rxrpc_transmit_traces[rxrpc_transmit__nr_trace][4];
|
|
|
|
|
2016-09-17 17:49:13 +08:00
|
|
|
enum rxrpc_receive_trace {
|
|
|
|
rxrpc_receive_incoming,
|
|
|
|
rxrpc_receive_queue,
|
|
|
|
rxrpc_receive_queue_last,
|
|
|
|
rxrpc_receive_front,
|
|
|
|
rxrpc_receive_rotate,
|
|
|
|
rxrpc_receive_end,
|
|
|
|
rxrpc_receive__nr_trace
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const char rxrpc_receive_traces[rxrpc_receive__nr_trace][4];
|
|
|
|
|
2016-09-17 18:13:31 +08:00
|
|
|
enum rxrpc_recvmsg_trace {
|
|
|
|
rxrpc_recvmsg_enter,
|
|
|
|
rxrpc_recvmsg_wait,
|
|
|
|
rxrpc_recvmsg_dequeue,
|
|
|
|
rxrpc_recvmsg_hole,
|
|
|
|
rxrpc_recvmsg_next,
|
|
|
|
rxrpc_recvmsg_cont,
|
|
|
|
rxrpc_recvmsg_full,
|
|
|
|
rxrpc_recvmsg_data_return,
|
|
|
|
rxrpc_recvmsg_terminal,
|
|
|
|
rxrpc_recvmsg_to_be_accepted,
|
|
|
|
rxrpc_recvmsg_return,
|
|
|
|
rxrpc_recvmsg__nr_trace
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const char rxrpc_recvmsg_traces[rxrpc_recvmsg__nr_trace][5];
|
|
|
|
|
2016-09-22 07:41:53 +08:00
|
|
|
enum rxrpc_rtt_tx_trace {
|
|
|
|
rxrpc_rtt_tx_ping,
|
2016-09-22 07:29:31 +08:00
|
|
|
rxrpc_rtt_tx_data,
|
2016-09-22 07:41:53 +08:00
|
|
|
rxrpc_rtt_tx__nr_trace
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const char rxrpc_rtt_tx_traces[rxrpc_rtt_tx__nr_trace][5];
|
|
|
|
|
|
|
|
enum rxrpc_rtt_rx_trace {
|
|
|
|
rxrpc_rtt_rx_ping_response,
|
2016-09-22 07:29:31 +08:00
|
|
|
rxrpc_rtt_rx_requested_ack,
|
2016-09-22 07:41:53 +08:00
|
|
|
rxrpc_rtt_rx__nr_trace
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5];
|
|
|
|
|
2016-09-23 22:22:36 +08:00
|
|
|
enum rxrpc_timer_trace {
|
|
|
|
rxrpc_timer_begin,
|
|
|
|
rxrpc_timer_expired,
|
|
|
|
rxrpc_timer_set_for_ack,
|
|
|
|
rxrpc_timer_set_for_resend,
|
|
|
|
rxrpc_timer_set_for_send,
|
|
|
|
rxrpc_timer__nr_trace
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const char rxrpc_timer_traces[rxrpc_timer__nr_trace][8];
|
|
|
|
|
2016-09-23 20:50:40 +08:00
|
|
|
enum rxrpc_propose_ack_trace {
|
|
|
|
rxrpc_propose_ack_input_data,
|
|
|
|
rxrpc_propose_ack_ping_for_params,
|
|
|
|
rxrpc_propose_ack_respond_to_ack,
|
|
|
|
rxrpc_propose_ack_respond_to_ping,
|
|
|
|
rxrpc_propose_ack_retry_tx,
|
2016-09-25 01:05:26 +08:00
|
|
|
rxrpc_propose_ack_rotate_rx,
|
2016-09-23 20:50:40 +08:00
|
|
|
rxrpc_propose_ack_terminal_ack,
|
|
|
|
rxrpc_propose_ack__nr_trace
|
|
|
|
};
|
|
|
|
|
|
|
|
enum rxrpc_propose_ack_outcome {
|
|
|
|
rxrpc_propose_ack_use,
|
|
|
|
rxrpc_propose_ack_update,
|
|
|
|
rxrpc_propose_ack_subsume,
|
|
|
|
rxrpc_propose_ack__nr_outcomes
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const char rxrpc_propose_ack_traces[rxrpc_propose_ack__nr_trace][8];
|
|
|
|
extern const char *const rxrpc_propose_ack_outcomes[rxrpc_propose_ack__nr_outcomes];
|
|
|
|
|
2016-09-17 17:49:13 +08:00
|
|
|
extern const char *const rxrpc_pkts[];
|
2016-09-23 20:50:40 +08:00
|
|
|
extern const char const rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4];
|
2016-09-17 17:49:13 +08:00
|
|
|
|
2016-08-23 22:27:24 +08:00
|
|
|
#include <trace/events/rxrpc.h>
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
[AF_RXRPC]: Add an interface to the AF_RXRPC module for the AFS filesystem to use
Add an interface to the AF_RXRPC module so that the AFS filesystem module can
more easily make use of the services available. AFS still opens a socket but
then uses the action functions in lieu of sendmsg() and registers an intercept
functions to grab messages before they're queued on the socket Rx queue.
This permits AFS (or whatever) to:
(1) Avoid the overhead of using the recvmsg() call.
(2) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(3) Avoid calling request_key() at the point of issue of a call or opening of
a socket. This is done instead by AFS at the point of open(), unlink() or
other VFS operation and the key handed through.
(4) Request the use of something other than GFP_KERNEL to allocate memory.
Furthermore:
(*) The socket buffer markings used by RxRPC are made available for AFS so
that it can interpret the cooked RxRPC messages itself.
(*) rxgen (un)marshalling abort codes are made available.
The following documentation for the kernel interface is added to
Documentation/networking/rxrpc.txt:
=========================
AF_RXRPC KERNEL INTERFACE
=========================
The AF_RXRPC module also provides an interface for use by in-kernel utilities
such as the AFS filesystem. This permits such a utility to:
(1) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(2) Avoid having RxRPC call request_key() at the point of issue of a call or
opening of a socket. Instead the utility is responsible for requesting a
key at the appropriate point. AFS, for instance, would do this during VFS
operations such as open() or unlink(). The key is then handed through
when the call is initiated.
(3) Request the use of something other than GFP_KERNEL to allocate memory.
(4) Avoid the overhead of using the recvmsg() call. RxRPC messages can be
intercepted before they get put into the socket Rx queue and the socket
buffers manipulated directly.
To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket,
bind an addess as appropriate and listen if it's to be a server socket, but
then it passes this to the kernel interface functions.
The kernel interface functions are as follows:
(*) Begin a new client call.
struct rxrpc_call *
rxrpc_kernel_begin_call(struct socket *sock,
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
gfp_t gfp);
This allocates the infrastructure to make a new RxRPC call and assigns
call and connection numbers. The call will be made on the UDP port that
the socket is bound to. The call will go to the destination address of a
connected client socket unless an alternative is supplied (srx is
non-NULL).
If a key is supplied then this will be used to secure the call instead of
the key bound to the socket with the RXRPC_SECURITY_KEY sockopt. Calls
secured in this way will still share connections if at all possible.
The user_call_ID is equivalent to that supplied to sendmsg() in the
control data buffer. It is entirely feasible to use this to point to a
kernel data structure.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) End a client call.
void rxrpc_kernel_end_call(struct rxrpc_call *call);
This is used to end a previously begun call. The user_call_ID is expunged
from AF_RXRPC's knowledge and will not be seen again in association with
the specified call.
(*) Send data through a call.
int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
size_t len);
This is used to supply either the request part of a client call or the
reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the
data buffers to be used. msg_iov may not be NULL and must point
exclusively to in-kernel virtual addresses. msg.msg_flags may be given
MSG_MORE if there will be subsequent data sends for this call.
The msg must not specify a destination address, control data or any flags
other than MSG_MORE. len is the total amount of data to transmit.
(*) Abort a call.
void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code);
This is used to abort a call if it's still in an abortable state. The
abort code specified will be placed in the ABORT message sent.
(*) Intercept received RxRPC messages.
typedef void (*rxrpc_interceptor_t)(struct sock *sk,
unsigned long user_call_ID,
struct sk_buff *skb);
void
rxrpc_kernel_intercept_rx_messages(struct socket *sock,
rxrpc_interceptor_t interceptor);
This installs an interceptor function on the specified AF_RXRPC socket.
All messages that would otherwise wind up in the socket's Rx queue are
then diverted to this function. Note that care must be taken to process
the messages in the right order to maintain DATA message sequentiality.
The interceptor function itself is provided with the address of the socket
and handling the incoming message, the ID assigned by the kernel utility
to the call and the socket buffer containing the message.
The skb->mark field indicates the type of message:
MARK MEANING
=============================== =======================================
RXRPC_SKB_MARK_DATA Data message
RXRPC_SKB_MARK_FINAL_ACK Final ACK received for an incoming call
RXRPC_SKB_MARK_BUSY Client call rejected as server busy
RXRPC_SKB_MARK_REMOTE_ABORT Call aborted by peer
RXRPC_SKB_MARK_NET_ERROR Network error detected
RXRPC_SKB_MARK_LOCAL_ERROR Local error encountered
RXRPC_SKB_MARK_NEW_CALL New incoming call awaiting acceptance
The remote abort message can be probed with rxrpc_kernel_get_abort_code().
The two error messages can be probed with rxrpc_kernel_get_error_number().
A new call can be accepted with rxrpc_kernel_accept_call().
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
data message has been used up, rxrpc_kernel_data_delivered() should be
called on it..
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
of. It is possible to get extra refs on all types of message for later
freeing, but this may pin the state of a call until the message is finally
freed.
(*) Accept an incoming call.
struct rxrpc_call *
rxrpc_kernel_accept_call(struct socket *sock,
unsigned long user_call_ID);
This is used to accept an incoming call and to assign it a call ID. This
function is similar to rxrpc_kernel_begin_call() and calls accepted must
be ended in the same way.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) Reject an incoming call.
int rxrpc_kernel_reject_call(struct socket *sock);
This is used to reject the first incoming call on the socket's queue with
a BUSY message. -ENODATA is returned if there were no incoming calls.
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
(*) Record the delivery of a data message and free it.
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
This is used to record a data message as having been delivered and to
update the ACK state for the call. The socket buffer will be freed.
(*) Free a message.
void rxrpc_kernel_free_skb(struct sk_buff *skb);
This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC
socket.
(*) Determine if a data message is the last one on a call.
bool rxrpc_kernel_is_data_last(struct sk_buff *skb);
This is used to determine if a socket buffer holds the last data message
to be received for a call (true will be returned if it does, false
if not).
The data message will be part of the reply on a client call and the
request on an incoming call. In the latter case there will be more
messages, but in the former case there will not.
(*) Get the abort code from an abort message.
u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb);
This is used to extract the abort code from a remote abort message.
(*) Get the error number from a local or network error message.
int rxrpc_kernel_get_error_number(struct sk_buff *skb);
This is used to extract the error number from a message indicating either
a local error occurred or a network error occurred.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-27 06:50:17 +08:00
|
|
|
* af_rxrpc.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-09-17 17:49:14 +08:00
|
|
|
extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
|
2016-03-04 23:53:46 +08:00
|
|
|
extern u32 rxrpc_epoch;
|
[AF_RXRPC]: Add an interface to the AF_RXRPC module for the AFS filesystem to use
Add an interface to the AF_RXRPC module so that the AFS filesystem module can
more easily make use of the services available. AFS still opens a socket but
then uses the action functions in lieu of sendmsg() and registers an intercept
functions to grab messages before they're queued on the socket Rx queue.
This permits AFS (or whatever) to:
(1) Avoid the overhead of using the recvmsg() call.
(2) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(3) Avoid calling request_key() at the point of issue of a call or opening of
a socket. This is done instead by AFS at the point of open(), unlink() or
other VFS operation and the key handed through.
(4) Request the use of something other than GFP_KERNEL to allocate memory.
Furthermore:
(*) The socket buffer markings used by RxRPC are made available for AFS so
that it can interpret the cooked RxRPC messages itself.
(*) rxgen (un)marshalling abort codes are made available.
The following documentation for the kernel interface is added to
Documentation/networking/rxrpc.txt:
=========================
AF_RXRPC KERNEL INTERFACE
=========================
The AF_RXRPC module also provides an interface for use by in-kernel utilities
such as the AFS filesystem. This permits such a utility to:
(1) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(2) Avoid having RxRPC call request_key() at the point of issue of a call or
opening of a socket. Instead the utility is responsible for requesting a
key at the appropriate point. AFS, for instance, would do this during VFS
operations such as open() or unlink(). The key is then handed through
when the call is initiated.
(3) Request the use of something other than GFP_KERNEL to allocate memory.
(4) Avoid the overhead of using the recvmsg() call. RxRPC messages can be
intercepted before they get put into the socket Rx queue and the socket
buffers manipulated directly.
To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket,
bind an addess as appropriate and listen if it's to be a server socket, but
then it passes this to the kernel interface functions.
The kernel interface functions are as follows:
(*) Begin a new client call.
struct rxrpc_call *
rxrpc_kernel_begin_call(struct socket *sock,
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
gfp_t gfp);
This allocates the infrastructure to make a new RxRPC call and assigns
call and connection numbers. The call will be made on the UDP port that
the socket is bound to. The call will go to the destination address of a
connected client socket unless an alternative is supplied (srx is
non-NULL).
If a key is supplied then this will be used to secure the call instead of
the key bound to the socket with the RXRPC_SECURITY_KEY sockopt. Calls
secured in this way will still share connections if at all possible.
The user_call_ID is equivalent to that supplied to sendmsg() in the
control data buffer. It is entirely feasible to use this to point to a
kernel data structure.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) End a client call.
void rxrpc_kernel_end_call(struct rxrpc_call *call);
This is used to end a previously begun call. The user_call_ID is expunged
from AF_RXRPC's knowledge and will not be seen again in association with
the specified call.
(*) Send data through a call.
int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
size_t len);
This is used to supply either the request part of a client call or the
reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the
data buffers to be used. msg_iov may not be NULL and must point
exclusively to in-kernel virtual addresses. msg.msg_flags may be given
MSG_MORE if there will be subsequent data sends for this call.
The msg must not specify a destination address, control data or any flags
other than MSG_MORE. len is the total amount of data to transmit.
(*) Abort a call.
void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code);
This is used to abort a call if it's still in an abortable state. The
abort code specified will be placed in the ABORT message sent.
(*) Intercept received RxRPC messages.
typedef void (*rxrpc_interceptor_t)(struct sock *sk,
unsigned long user_call_ID,
struct sk_buff *skb);
void
rxrpc_kernel_intercept_rx_messages(struct socket *sock,
rxrpc_interceptor_t interceptor);
This installs an interceptor function on the specified AF_RXRPC socket.
All messages that would otherwise wind up in the socket's Rx queue are
then diverted to this function. Note that care must be taken to process
the messages in the right order to maintain DATA message sequentiality.
The interceptor function itself is provided with the address of the socket
and handling the incoming message, the ID assigned by the kernel utility
to the call and the socket buffer containing the message.
The skb->mark field indicates the type of message:
MARK MEANING
=============================== =======================================
RXRPC_SKB_MARK_DATA Data message
RXRPC_SKB_MARK_FINAL_ACK Final ACK received for an incoming call
RXRPC_SKB_MARK_BUSY Client call rejected as server busy
RXRPC_SKB_MARK_REMOTE_ABORT Call aborted by peer
RXRPC_SKB_MARK_NET_ERROR Network error detected
RXRPC_SKB_MARK_LOCAL_ERROR Local error encountered
RXRPC_SKB_MARK_NEW_CALL New incoming call awaiting acceptance
The remote abort message can be probed with rxrpc_kernel_get_abort_code().
The two error messages can be probed with rxrpc_kernel_get_error_number().
A new call can be accepted with rxrpc_kernel_accept_call().
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
data message has been used up, rxrpc_kernel_data_delivered() should be
called on it..
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
of. It is possible to get extra refs on all types of message for later
freeing, but this may pin the state of a call until the message is finally
freed.
(*) Accept an incoming call.
struct rxrpc_call *
rxrpc_kernel_accept_call(struct socket *sock,
unsigned long user_call_ID);
This is used to accept an incoming call and to assign it a call ID. This
function is similar to rxrpc_kernel_begin_call() and calls accepted must
be ended in the same way.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) Reject an incoming call.
int rxrpc_kernel_reject_call(struct socket *sock);
This is used to reject the first incoming call on the socket's queue with
a BUSY message. -ENODATA is returned if there were no incoming calls.
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
(*) Record the delivery of a data message and free it.
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
This is used to record a data message as having been delivered and to
update the ACK state for the call. The socket buffer will be freed.
(*) Free a message.
void rxrpc_kernel_free_skb(struct sk_buff *skb);
This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC
socket.
(*) Determine if a data message is the last one on a call.
bool rxrpc_kernel_is_data_last(struct sk_buff *skb);
This is used to determine if a socket buffer holds the last data message
to be received for a call (true will be returned if it does, false
if not).
The data message will be part of the reply on a client call and the
request on an incoming call. In the latter case there will be more
messages, but in the former case there will not.
(*) Get the abort code from an abort message.
u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb);
This is used to extract the abort code from a remote abort message.
(*) Get the error number from a local or network error message.
int rxrpc_kernel_get_error_number(struct sk_buff *skb);
This is used to extract the error number from a message indicating either
a local error occurred or a network error occurred.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-27 06:50:17 +08:00
|
|
|
extern atomic_t rxrpc_debug_id;
|
|
|
|
extern struct workqueue_struct *rxrpc_workqueue;
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* call_accept.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-09-08 18:10:12 +08:00
|
|
|
int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
|
|
|
|
void rxrpc_discard_prealloc(struct rxrpc_sock *);
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
|
|
|
|
struct rxrpc_connection *,
|
|
|
|
struct sk_buff *);
|
2016-04-04 21:00:35 +08:00
|
|
|
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-31 03:42:14 +08:00
|
|
|
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
|
|
|
|
rxrpc_notify_rx_t);
|
2013-10-19 04:48:25 +08:00
|
|
|
int rxrpc_reject_call(struct rxrpc_sock *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* call_event.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-09-23 22:22:36 +08:00
|
|
|
void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace);
|
2016-09-23 20:50:40 +08:00
|
|
|
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
|
|
|
|
enum rxrpc_propose_ack_trace);
|
2013-10-19 04:48:25 +08:00
|
|
|
void rxrpc_process_call(struct work_struct *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* call_object.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-08-30 16:49:28 +08:00
|
|
|
extern const char *const rxrpc_call_states[];
|
|
|
|
extern const char *const rxrpc_call_completions[];
|
2016-03-10 07:22:56 +08:00
|
|
|
extern unsigned int rxrpc_max_call_lifetime;
|
2007-04-27 06:48:28 +08:00
|
|
|
extern struct kmem_cache *rxrpc_call_jar;
|
|
|
|
extern struct list_head rxrpc_calls;
|
|
|
|
extern rwlock_t rxrpc_call_lock;
|
|
|
|
|
2016-06-10 06:02:51 +08:00
|
|
|
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
|
2016-09-08 18:10:12 +08:00
|
|
|
struct rxrpc_call *rxrpc_alloc_call(gfp_t);
|
2016-06-10 06:02:51 +08:00
|
|
|
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
|
2016-04-04 21:00:36 +08:00
|
|
|
struct rxrpc_conn_parameters *,
|
2016-06-17 22:42:35 +08:00
|
|
|
struct sockaddr_rxrpc *,
|
2016-06-10 06:02:51 +08:00
|
|
|
unsigned long, gfp_t);
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
|
|
|
|
struct sk_buff *);
|
2016-09-07 16:19:31 +08:00
|
|
|
void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
|
2013-10-19 04:48:25 +08:00
|
|
|
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
|
2016-09-07 16:19:31 +08:00
|
|
|
bool __rxrpc_queue_call(struct rxrpc_call *);
|
|
|
|
bool rxrpc_queue_call(struct rxrpc_call *);
|
2016-08-30 16:49:29 +08:00
|
|
|
void rxrpc_see_call(struct rxrpc_call *);
|
2016-09-07 21:34:21 +08:00
|
|
|
void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
|
|
|
|
void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
|
2016-09-08 18:10:12 +08:00
|
|
|
void rxrpc_cleanup_call(struct rxrpc_call *);
|
2013-10-19 04:48:25 +08:00
|
|
|
void __exit rxrpc_destroy_all_calls(void);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2016-08-23 22:27:24 +08:00
|
|
|
static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
|
|
|
|
{
|
|
|
|
return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
|
|
|
|
{
|
|
|
|
return !rxrpc_is_service_call(call);
|
|
|
|
}
|
|
|
|
|
2016-08-30 16:49:28 +08:00
|
|
|
/*
|
|
|
|
* Transition a call to the complete state.
|
|
|
|
*/
|
|
|
|
static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
|
|
|
|
enum rxrpc_call_completion compl,
|
|
|
|
u32 abort_code,
|
|
|
|
int error)
|
|
|
|
{
|
|
|
|
if (call->state < RXRPC_CALL_COMPLETE) {
|
|
|
|
call->abort_code = abort_code;
|
|
|
|
call->error = error;
|
|
|
|
call->completion = compl,
|
|
|
|
call->state = RXRPC_CALL_COMPLETE;
|
2016-09-23 19:39:23 +08:00
|
|
|
wake_up(&call->waitq);
|
2016-08-30 16:49:28 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
|
|
|
|
enum rxrpc_call_completion compl,
|
|
|
|
u32 abort_code,
|
|
|
|
int error)
|
|
|
|
{
|
2016-09-07 23:34:12 +08:00
|
|
|
bool ret;
|
2016-08-30 16:49:28 +08:00
|
|
|
|
|
|
|
write_lock_bh(&call->state_lock);
|
|
|
|
ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
|
|
|
|
write_unlock_bh(&call->state_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record that a call successfully completed.
|
|
|
|
*/
|
2016-09-07 23:34:12 +08:00
|
|
|
static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
|
2016-08-30 16:49:28 +08:00
|
|
|
{
|
2016-09-07 23:34:12 +08:00
|
|
|
return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
|
2016-08-30 16:49:28 +08:00
|
|
|
}
|
|
|
|
|
2016-09-07 23:34:12 +08:00
|
|
|
static inline bool rxrpc_call_completed(struct rxrpc_call *call)
|
2016-08-30 16:49:28 +08:00
|
|
|
{
|
2016-09-07 23:34:12 +08:00
|
|
|
bool ret;
|
|
|
|
|
2016-08-30 16:49:28 +08:00
|
|
|
write_lock_bh(&call->state_lock);
|
2016-09-07 23:34:12 +08:00
|
|
|
ret = __rxrpc_call_completed(call);
|
2016-08-30 16:49:28 +08:00
|
|
|
write_unlock_bh(&call->state_lock);
|
2016-09-07 23:34:12 +08:00
|
|
|
return ret;
|
2016-08-30 16:49:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record that a call is locally aborted.
|
|
|
|
*/
|
2016-09-07 05:19:51 +08:00
|
|
|
static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
|
|
|
|
rxrpc_seq_t seq,
|
2016-08-30 16:49:28 +08:00
|
|
|
u32 abort_code, int error)
|
|
|
|
{
|
2016-09-07 05:19:51 +08:00
|
|
|
trace_rxrpc_abort(why, call->cid, call->call_id, seq,
|
|
|
|
abort_code, error);
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
|
|
|
|
abort_code, error);
|
2016-08-30 16:49:28 +08:00
|
|
|
}
|
|
|
|
|
2016-09-07 05:19:51 +08:00
|
|
|
static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
|
|
|
|
rxrpc_seq_t seq, u32 abort_code, int error)
|
2016-08-30 16:49:28 +08:00
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
write_lock_bh(&call->state_lock);
|
2016-09-07 05:19:51 +08:00
|
|
|
ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
|
2016-08-30 16:49:28 +08:00
|
|
|
write_unlock_bh(&call->state_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-04-04 21:00:37 +08:00
|
|
|
/*
|
|
|
|
* conn_client.c
|
|
|
|
*/
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
extern unsigned int rxrpc_max_client_connections;
|
|
|
|
extern unsigned int rxrpc_reap_client_connections;
|
|
|
|
extern unsigned int rxrpc_conn_idle_client_expiry;
|
|
|
|
extern unsigned int rxrpc_conn_idle_client_fast_expiry;
|
2016-04-04 21:00:37 +08:00
|
|
|
extern struct idr rxrpc_client_conn_ids;
|
|
|
|
|
2016-06-27 17:32:02 +08:00
|
|
|
void rxrpc_destroy_client_conn_ids(void);
|
2016-04-04 21:00:40 +08:00
|
|
|
int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
|
|
|
|
struct sockaddr_rxrpc *, gfp_t);
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
void rxrpc_expose_client_call(struct rxrpc_call *);
|
|
|
|
void rxrpc_disconnect_client_call(struct rxrpc_call *);
|
|
|
|
void rxrpc_put_client_conn(struct rxrpc_connection *);
|
|
|
|
void __exit rxrpc_destroy_all_client_connections(void);
|
2016-04-04 21:00:37 +08:00
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* conn_event.c
|
|
|
|
*/
|
|
|
|
void rxrpc_process_connection(struct work_struct *);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* conn_object.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-03-10 07:22:56 +08:00
|
|
|
extern unsigned int rxrpc_connection_expiry;
|
2007-04-27 06:48:28 +08:00
|
|
|
extern struct list_head rxrpc_connections;
|
2016-08-24 14:30:52 +08:00
|
|
|
extern struct list_head rxrpc_connection_proc_list;
|
2007-04-27 06:48:28 +08:00
|
|
|
extern rwlock_t rxrpc_connection_lock;
|
|
|
|
|
2016-07-01 14:51:50 +08:00
|
|
|
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
|
2016-04-04 21:00:40 +08:00
|
|
|
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
|
2016-07-01 14:51:50 +08:00
|
|
|
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
|
|
|
|
struct sk_buff *);
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
|
2016-06-17 22:42:35 +08:00
|
|
|
void rxrpc_disconnect_call(struct rxrpc_call *);
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
void rxrpc_kill_connection(struct rxrpc_connection *);
|
2016-09-17 17:49:14 +08:00
|
|
|
bool rxrpc_queue_conn(struct rxrpc_connection *);
|
|
|
|
void rxrpc_see_connection(struct rxrpc_connection *);
|
|
|
|
void rxrpc_get_connection(struct rxrpc_connection *);
|
|
|
|
struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
|
|
|
|
void rxrpc_put_service_conn(struct rxrpc_connection *);
|
2013-10-19 04:48:25 +08:00
|
|
|
void __exit rxrpc_destroy_all_connections(void);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2016-04-04 21:00:36 +08:00
|
|
|
static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
|
|
|
|
{
|
|
|
|
return conn->out_clientflag;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
|
|
|
|
{
|
2016-06-30 19:16:21 +08:00
|
|
|
return !rxrpc_conn_is_client(conn);
|
2016-04-04 21:00:36 +08:00
|
|
|
}
|
|
|
|
|
2016-08-23 22:27:24 +08:00
|
|
|
static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
|
|
|
|
{
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
if (!conn)
|
|
|
|
return;
|
|
|
|
|
2016-09-17 17:49:14 +08:00
|
|
|
if (rxrpc_conn_is_client(conn))
|
|
|
|
rxrpc_put_client_conn(conn);
|
|
|
|
else
|
|
|
|
rxrpc_put_service_conn(conn);
|
2016-06-27 17:32:02 +08:00
|
|
|
}
|
|
|
|
|
2016-04-04 21:00:40 +08:00
|
|
|
/*
|
|
|
|
* conn_service.c
|
|
|
|
*/
|
2016-07-01 14:51:50 +08:00
|
|
|
struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
|
|
|
|
struct sk_buff *);
|
2016-09-08 18:10:12 +08:00
|
|
|
struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t);
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
void rxrpc_new_incoming_connection(struct rxrpc_connection *, struct sk_buff *);
|
2016-06-30 17:45:22 +08:00
|
|
|
void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
|
2016-04-04 21:00:40 +08:00
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* input.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-06-13 20:30:30 +08:00
|
|
|
void rxrpc_data_ready(struct sock *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* insecure.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-06-13 20:30:30 +08:00
|
|
|
extern const struct rxrpc_security rxrpc_no_security;
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* key.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-06-13 20:30:30 +08:00
|
|
|
extern struct key_type key_type_rxrpc;
|
|
|
|
extern struct key_type key_type_rxrpc_s;
|
|
|
|
|
|
|
|
int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
|
|
|
|
int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
|
|
|
|
int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
|
|
|
|
u32);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2016-04-04 21:00:34 +08:00
|
|
|
/*
|
|
|
|
* local_event.c
|
|
|
|
*/
|
2016-04-04 21:00:35 +08:00
|
|
|
extern void rxrpc_process_local_events(struct rxrpc_local *);
|
2016-04-04 21:00:34 +08:00
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* local_object.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-04-04 21:00:35 +08:00
|
|
|
struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *);
|
|
|
|
void __rxrpc_put_local(struct rxrpc_local *);
|
2013-10-19 04:48:25 +08:00
|
|
|
void __exit rxrpc_destroy_all_locals(void);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2016-04-04 21:00:35 +08:00
|
|
|
static inline void rxrpc_get_local(struct rxrpc_local *local)
|
|
|
|
{
|
|
|
|
atomic_inc(&local->usage);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
|
|
|
|
{
|
|
|
|
return atomic_inc_not_zero(&local->usage) ? local : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void rxrpc_put_local(struct rxrpc_local *local)
|
|
|
|
{
|
2016-04-04 21:00:38 +08:00
|
|
|
if (local && atomic_dec_and_test(&local->usage))
|
2016-04-04 21:00:35 +08:00
|
|
|
__rxrpc_put_local(local);
|
|
|
|
}
|
|
|
|
|
2016-06-27 17:32:02 +08:00
|
|
|
static inline void rxrpc_queue_local(struct rxrpc_local *local)
|
|
|
|
{
|
|
|
|
rxrpc_queue_work(&local->processor);
|
|
|
|
}
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* misc.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-06-13 20:30:30 +08:00
|
|
|
extern unsigned int rxrpc_max_backlog __read_mostly;
|
|
|
|
extern unsigned int rxrpc_requested_ack_delay;
|
|
|
|
extern unsigned int rxrpc_soft_ack_delay;
|
|
|
|
extern unsigned int rxrpc_idle_ack_delay;
|
|
|
|
extern unsigned int rxrpc_rx_window_size;
|
|
|
|
extern unsigned int rxrpc_rx_mtu;
|
|
|
|
extern unsigned int rxrpc_rx_jumbo_max;
|
2016-09-03 05:39:45 +08:00
|
|
|
extern unsigned int rxrpc_resend_timeout;
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2016-06-13 20:30:30 +08:00
|
|
|
extern const s8 rxrpc_ack_priority[];
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* output.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-09-07 16:19:31 +08:00
|
|
|
int rxrpc_send_call_packet(struct rxrpc_call *, u8);
|
2016-09-22 07:29:31 +08:00
|
|
|
int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *);
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
void rxrpc_reject_packets(struct rxrpc_local *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-04-04 21:00:32 +08:00
|
|
|
* peer_event.c
|
2016-06-13 20:30:30 +08:00
|
|
|
*/
|
2016-04-04 21:00:32 +08:00
|
|
|
void rxrpc_error_report(struct sock *);
|
2016-04-04 21:00:34 +08:00
|
|
|
void rxrpc_peer_error_distributor(struct work_struct *);
|
2016-09-22 07:41:53 +08:00
|
|
|
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
|
|
|
|
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
|
2016-06-13 20:30:30 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* peer_object.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-04-04 21:00:32 +08:00
|
|
|
struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
|
|
|
|
const struct sockaddr_rxrpc *);
|
|
|
|
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
|
|
|
|
struct sockaddr_rxrpc *, gfp_t);
|
|
|
|
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
|
|
|
|
struct rxrpc_peer *);
|
2016-04-04 21:00:32 +08:00
|
|
|
|
2016-08-24 21:31:43 +08:00
|
|
|
static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
|
2016-04-04 21:00:32 +08:00
|
|
|
{
|
|
|
|
atomic_inc(&peer->usage);
|
2016-08-24 21:31:43 +08:00
|
|
|
return peer;
|
2016-04-04 21:00:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
|
|
|
|
{
|
|
|
|
return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
|
|
|
|
static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
|
|
|
|
{
|
2016-04-04 21:00:38 +08:00
|
|
|
if (peer && atomic_dec_and_test(&peer->usage))
|
2016-04-04 21:00:32 +08:00
|
|
|
__rxrpc_put_peer(peer);
|
|
|
|
}
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* proc.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2008-01-31 10:55:45 +08:00
|
|
|
extern const struct file_operations rxrpc_call_seq_fops;
|
|
|
|
extern const struct file_operations rxrpc_connection_seq_fops;
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* recvmsg.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
void rxrpc_notify_socket(struct rxrpc_call *);
|
2015-03-02 15:37:48 +08:00
|
|
|
int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* rxkad.c
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_RXKAD
|
|
|
|
extern const struct rxrpc_security rxkad;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* security.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-04-08 00:23:51 +08:00
|
|
|
int __init rxrpc_init_security(void);
|
|
|
|
void rxrpc_exit_security(void);
|
2013-10-19 04:48:25 +08:00
|
|
|
int rxrpc_init_client_conn_security(struct rxrpc_connection *);
|
|
|
|
int rxrpc_init_server_conn_security(struct rxrpc_connection *);
|
2016-09-07 21:43:39 +08:00
|
|
|
|
2016-09-03 05:39:45 +08:00
|
|
|
/*
|
|
|
|
* sendmsg.c
|
|
|
|
*/
|
|
|
|
int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* skbuff.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-31 03:42:14 +08:00
|
|
|
void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
|
2013-10-19 04:48:25 +08:00
|
|
|
void rxrpc_packet_destructor(struct sk_buff *);
|
2016-09-17 17:49:14 +08:00
|
|
|
void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
|
|
|
|
void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
|
|
|
|
void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
|
|
|
|
void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
|
|
|
|
void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
|
2016-08-23 22:27:24 +08:00
|
|
|
void rxrpc_purge_queue(struct sk_buff_head *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2014-02-08 02:58:44 +08:00
|
|
|
/*
|
|
|
|
* sysctl.c
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
|
|
extern int __init rxrpc_sysctl_init(void);
|
|
|
|
extern void rxrpc_sysctl_exit(void);
|
|
|
|
#else
|
|
|
|
static inline int __init rxrpc_sysctl_init(void) { return 0; }
|
|
|
|
static inline void rxrpc_sysctl_exit(void) {}
|
|
|
|
#endif
|
|
|
|
|
2016-04-04 21:00:32 +08:00
|
|
|
/*
|
|
|
|
* utils.c
|
|
|
|
*/
|
2016-06-29 21:40:39 +08:00
|
|
|
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
|
2016-04-04 21:00:32 +08:00
|
|
|
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
static inline bool before(u32 seq1, u32 seq2)
|
|
|
|
{
|
|
|
|
return (s32)(seq1 - seq2) < 0;
|
|
|
|
}
|
|
|
|
static inline bool before_eq(u32 seq1, u32 seq2)
|
|
|
|
{
|
|
|
|
return (s32)(seq1 - seq2) <= 0;
|
|
|
|
}
|
|
|
|
static inline bool after(u32 seq1, u32 seq2)
|
|
|
|
{
|
|
|
|
return (s32)(seq1 - seq2) > 0;
|
|
|
|
}
|
|
|
|
static inline bool after_eq(u32 seq1, u32 seq2)
|
|
|
|
{
|
|
|
|
return (s32)(seq1 - seq2) >= 0;
|
|
|
|
}
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
|
|
|
* debug tracing
|
|
|
|
*/
|
2012-04-15 13:58:06 +08:00
|
|
|
extern unsigned int rxrpc_debug;
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define dbgprintk(FMT,...) \
|
2008-04-03 17:45:30 +08:00
|
|
|
printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2008-03-06 12:47:47 +08:00
|
|
|
#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
|
|
|
|
#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
|
2007-04-27 06:48:28 +08:00
|
|
|
#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
|
|
|
|
#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
|
|
|
|
#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(__KDEBUG)
|
|
|
|
#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
|
|
|
|
#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
|
|
|
|
#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
|
|
|
|
#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
|
|
|
|
#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
|
|
|
|
|
|
|
|
#elif defined(CONFIG_AF_RXRPC_DEBUG)
|
|
|
|
#define RXRPC_DEBUG_KENTER 0x01
|
|
|
|
#define RXRPC_DEBUG_KLEAVE 0x02
|
|
|
|
#define RXRPC_DEBUG_KDEBUG 0x04
|
|
|
|
#define RXRPC_DEBUG_KPROTO 0x08
|
|
|
|
#define RXRPC_DEBUG_KNET 0x10
|
|
|
|
|
|
|
|
#define _enter(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
|
|
|
|
kenter(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _leave(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
|
|
|
|
kleave(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _debug(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
|
|
|
|
kdebug(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _proto(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
|
|
|
|
kproto(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _net(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
|
|
|
|
knet(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#else
|
2010-08-12 23:54:57 +08:00
|
|
|
#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
|
|
|
|
#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
|
|
|
|
#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
|
|
|
|
#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
|
|
|
|
#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
|
2007-04-27 06:48:28 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* debug assertion checking
|
|
|
|
*/
|
|
|
|
#if 1 // defined(__KDEBUGALL)
|
|
|
|
|
|
|
|
#define ASSERT(X) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(!(X))) { \
|
2016-06-03 03:08:52 +08:00
|
|
|
pr_err("Assertion failed\n"); \
|
2007-04-27 06:48:28 +08:00
|
|
|
BUG(); \
|
|
|
|
} \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define ASSERTCMP(X, OP, Y) \
|
|
|
|
do { \
|
2016-09-08 18:10:11 +08:00
|
|
|
__typeof__(X) _x = (X); \
|
|
|
|
__typeof__(Y) _y = (__typeof__(X))(Y); \
|
2016-06-03 03:08:52 +08:00
|
|
|
if (unlikely(!(_x OP _y))) { \
|
2016-09-08 18:10:11 +08:00
|
|
|
pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
|
|
|
|
(unsigned long)_x, (unsigned long)_x, #OP, \
|
|
|
|
(unsigned long)_y, (unsigned long)_y); \
|
2007-04-27 06:48:28 +08:00
|
|
|
BUG(); \
|
|
|
|
} \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define ASSERTIF(C, X) \
|
|
|
|
do { \
|
|
|
|
if (unlikely((C) && !(X))) { \
|
2016-06-03 03:08:52 +08:00
|
|
|
pr_err("Assertion failed\n"); \
|
2007-04-27 06:48:28 +08:00
|
|
|
BUG(); \
|
|
|
|
} \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define ASSERTIFCMP(C, X, OP, Y) \
|
|
|
|
do { \
|
2016-09-08 18:10:11 +08:00
|
|
|
__typeof__(X) _x = (X); \
|
|
|
|
__typeof__(Y) _y = (__typeof__(X))(Y); \
|
2016-06-03 03:08:52 +08:00
|
|
|
if (unlikely((C) && !(_x OP _y))) { \
|
|
|
|
pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
|
2016-09-08 18:10:11 +08:00
|
|
|
(unsigned long)_x, (unsigned long)_x, #OP, \
|
|
|
|
(unsigned long)_y, (unsigned long)_y); \
|
2007-04-27 06:48:28 +08:00
|
|
|
BUG(); \
|
|
|
|
} \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define ASSERT(X) \
|
|
|
|
do { \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define ASSERTCMP(X, OP, Y) \
|
|
|
|
do { \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define ASSERTIF(C, X) \
|
|
|
|
do { \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define ASSERTIFCMP(C, X, OP, Y) \
|
|
|
|
do { \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#endif /* __KDEBUGALL */
|