rxrpc: Keep the call timeouts as ktimes rather than jiffies
Keep that call timeouts as ktimes rather than jiffies so that they can be expressed as functions of RTT. Signed-off-by: David Howells <dhowells@redhat.com>
This commit is contained in:
parent
c31410ea00
commit
df0adc788a
|
@ -453,17 +453,18 @@ TRACE_EVENT(rxrpc_rtt_rx,
|
|||
|
||||
TRACE_EVENT(rxrpc_timer,
|
||||
TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
||||
unsigned long now),
|
||||
ktime_t now, unsigned long now_j),
|
||||
|
||||
TP_ARGS(call, why, now),
|
||||
TP_ARGS(call, why, now, now_j),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct rxrpc_call *, call )
|
||||
__field(enum rxrpc_timer_trace, why )
|
||||
__field(unsigned long, now )
|
||||
__field(unsigned long, expire_at )
|
||||
__field(unsigned long, ack_at )
|
||||
__field(unsigned long, resend_at )
|
||||
__field_struct(ktime_t, now )
|
||||
__field_struct(ktime_t, expire_at )
|
||||
__field_struct(ktime_t, ack_at )
|
||||
__field_struct(ktime_t, resend_at )
|
||||
__field(unsigned long, now_j )
|
||||
__field(unsigned long, timer )
|
||||
),
|
||||
|
||||
|
@ -474,17 +475,17 @@ TRACE_EVENT(rxrpc_timer,
|
|||
__entry->expire_at = call->expire_at;
|
||||
__entry->ack_at = call->ack_at;
|
||||
__entry->resend_at = call->resend_at;
|
||||
__entry->now_j = now_j;
|
||||
__entry->timer = call->timer.expires;
|
||||
),
|
||||
|
||||
TP_printk("c=%p %s now=%lx x=%ld a=%ld r=%ld t=%ld",
|
||||
TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
|
||||
__entry->call,
|
||||
rxrpc_timer_traces[__entry->why],
|
||||
__entry->now,
|
||||
__entry->expire_at - __entry->now,
|
||||
__entry->ack_at - __entry->now,
|
||||
__entry->resend_at - __entry->now,
|
||||
__entry->timer - __entry->now)
|
||||
ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
|
||||
ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
|
||||
ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
|
||||
__entry->timer - __entry->now_j)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rxrpc_rx_lose,
|
||||
|
|
|
@ -464,9 +464,9 @@ struct rxrpc_call {
|
|||
struct rxrpc_connection *conn; /* connection carrying call */
|
||||
struct rxrpc_peer *peer; /* Peer record for remote address */
|
||||
struct rxrpc_sock __rcu *socket; /* socket responsible */
|
||||
unsigned long ack_at; /* When deferred ACK needs to happen */
|
||||
unsigned long resend_at; /* When next resend needs to happen */
|
||||
unsigned long expire_at; /* When the call times out */
|
||||
ktime_t ack_at; /* When deferred ACK needs to happen */
|
||||
ktime_t resend_at; /* When next resend needs to happen */
|
||||
ktime_t expire_at; /* When the call times out */
|
||||
struct timer_list timer; /* Combined event timer */
|
||||
struct work_struct processor; /* Event processor */
|
||||
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
|
||||
|
@ -805,7 +805,7 @@ int rxrpc_reject_call(struct rxrpc_sock *);
|
|||
/*
|
||||
* call_event.c
|
||||
*/
|
||||
void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace);
|
||||
void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
|
||||
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
|
||||
enum rxrpc_propose_ack_trace);
|
||||
void rxrpc_process_call(struct work_struct *);
|
||||
|
|
|
@ -24,28 +24,40 @@
|
|||
/*
|
||||
* Set the timer
|
||||
*/
|
||||
void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why)
|
||||
void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
|
||||
ktime_t now)
|
||||
{
|
||||
unsigned long t, now = jiffies;
|
||||
unsigned long t_j, now_j = jiffies;
|
||||
ktime_t t;
|
||||
|
||||
read_lock_bh(&call->state_lock);
|
||||
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
t = call->expire_at;
|
||||
if (time_before_eq(t, now))
|
||||
if (!ktime_after(t, now))
|
||||
goto out;
|
||||
|
||||
if (time_after(call->resend_at, now) &&
|
||||
time_before(call->resend_at, t))
|
||||
if (ktime_after(call->resend_at, now) &&
|
||||
ktime_before(call->resend_at, t))
|
||||
t = call->resend_at;
|
||||
|
||||
if (time_after(call->ack_at, now) &&
|
||||
time_before(call->ack_at, t))
|
||||
if (ktime_after(call->ack_at, now) &&
|
||||
ktime_before(call->ack_at, t))
|
||||
t = call->ack_at;
|
||||
|
||||
if (call->timer.expires != t || !timer_pending(&call->timer)) {
|
||||
mod_timer(&call->timer, t);
|
||||
trace_rxrpc_timer(call, why, now);
|
||||
t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
|
||||
t_j += jiffies;
|
||||
|
||||
/* We have to make sure that the calculated jiffies value falls
|
||||
* at or after the nsec value, or we may loop ceaselessly
|
||||
* because the timer times out, but we haven't reached the nsec
|
||||
* timeout yet.
|
||||
*/
|
||||
t_j++;
|
||||
|
||||
if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
|
||||
mod_timer(&call->timer, t_j);
|
||||
trace_rxrpc_timer(call, why, now, now_j);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,7 +74,8 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
|
|||
enum rxrpc_propose_ack_trace why)
|
||||
{
|
||||
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
|
||||
unsigned long now, ack_at, expiry = rxrpc_soft_ack_delay;
|
||||
unsigned int expiry = rxrpc_soft_ack_delay;
|
||||
ktime_t now, ack_at;
|
||||
s8 prior = rxrpc_ack_priority[ack_reason];
|
||||
|
||||
/* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
|
||||
|
@ -111,7 +124,6 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
|
|||
break;
|
||||
}
|
||||
|
||||
now = jiffies;
|
||||
if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
|
||||
_debug("already scheduled");
|
||||
} else if (immediate || expiry == 0) {
|
||||
|
@ -120,11 +132,11 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
|
|||
background)
|
||||
rxrpc_queue_call(call);
|
||||
} else {
|
||||
ack_at = now + expiry;
|
||||
_debug("deferred ACK %ld < %ld", expiry, call->ack_at - now);
|
||||
if (time_before(ack_at, call->ack_at)) {
|
||||
now = ktime_get_real();
|
||||
ack_at = ktime_add_ms(now, expiry);
|
||||
if (ktime_before(ack_at, call->ack_at)) {
|
||||
call->ack_at = ack_at;
|
||||
rxrpc_set_timer(call, rxrpc_timer_set_for_ack);
|
||||
rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,12 +169,12 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
|
|||
/*
|
||||
* Perform retransmission of NAK'd and unack'd packets.
|
||||
*/
|
||||
static void rxrpc_resend(struct rxrpc_call *call)
|
||||
static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp;
|
||||
struct sk_buff *skb;
|
||||
rxrpc_seq_t cursor, seq, top;
|
||||
ktime_t now = ktime_get_real(), max_age, oldest, resend_at, ack_ts;
|
||||
ktime_t max_age, oldest, ack_ts;
|
||||
int ix;
|
||||
u8 annotation, anno_type, retrans = 0, unacked = 0;
|
||||
|
||||
|
@ -212,14 +224,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
|
|||
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
|
||||
}
|
||||
|
||||
resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
|
||||
call->resend_at = jiffies +
|
||||
nsecs_to_jiffies(ktime_to_ns(ktime_sub(resend_at, now))) +
|
||||
1; /* We have to make sure that the calculated jiffies value
|
||||
* falls at or after the nsec value, or we shall loop
|
||||
* ceaselessly because the timer times out, but we haven't
|
||||
* reached the nsec timeout yet.
|
||||
*/
|
||||
call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
|
||||
|
||||
if (unacked)
|
||||
rxrpc_congestion_timeout(call);
|
||||
|
@ -229,7 +234,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
|
|||
* retransmitting data.
|
||||
*/
|
||||
if (!retrans) {
|
||||
rxrpc_set_timer(call, rxrpc_timer_set_for_resend);
|
||||
rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
|
||||
spin_unlock_bh(&call->lock);
|
||||
ack_ts = ktime_sub(now, call->acks_latest_ts);
|
||||
if (ktime_to_ns(ack_ts) < call->peer->rtt)
|
||||
|
@ -301,7 +306,7 @@ void rxrpc_process_call(struct work_struct *work)
|
|||
{
|
||||
struct rxrpc_call *call =
|
||||
container_of(work, struct rxrpc_call, processor);
|
||||
unsigned long now;
|
||||
ktime_t now;
|
||||
|
||||
rxrpc_see_call(call);
|
||||
|
||||
|
@ -320,15 +325,15 @@ void rxrpc_process_call(struct work_struct *work)
|
|||
goto out_put;
|
||||
}
|
||||
|
||||
now = jiffies;
|
||||
if (time_after_eq(now, call->expire_at)) {
|
||||
now = ktime_get_real();
|
||||
if (ktime_before(call->expire_at, now)) {
|
||||
rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME);
|
||||
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
|
||||
goto recheck_state;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
|
||||
time_after_eq(now, call->ack_at)) {
|
||||
ktime_before(call->ack_at, now)) {
|
||||
call->ack_at = call->expire_at;
|
||||
if (call->ackr_reason) {
|
||||
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
|
||||
|
@ -337,12 +342,12 @@ void rxrpc_process_call(struct work_struct *work)
|
|||
}
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) ||
|
||||
time_after_eq(now, call->resend_at)) {
|
||||
rxrpc_resend(call);
|
||||
ktime_before(call->resend_at, now)) {
|
||||
rxrpc_resend(call, now);
|
||||
goto recheck_state;
|
||||
}
|
||||
|
||||
rxrpc_set_timer(call, rxrpc_timer_set_for_resend);
|
||||
rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
|
||||
|
||||
/* other events may have been raised since we started checking */
|
||||
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
|
||||
|
|
|
@ -19,11 +19,6 @@
|
|||
#include <net/af_rxrpc.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
/*
|
||||
* Maximum lifetime of a call (in jiffies).
|
||||
*/
|
||||
unsigned int rxrpc_max_call_lifetime = 60 * HZ;
|
||||
|
||||
const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
|
||||
[RXRPC_CALL_UNINITIALISED] = "Uninit ",
|
||||
[RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
|
||||
|
@ -77,7 +72,8 @@ static void rxrpc_call_timer_expired(unsigned long _call)
|
|||
_enter("%d", call->debug_id);
|
||||
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
|
||||
trace_rxrpc_timer(call, rxrpc_timer_expired,
|
||||
ktime_get_real(), jiffies);
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
}
|
||||
|
@ -207,14 +203,14 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
|
|||
*/
|
||||
static void rxrpc_start_call_timer(struct rxrpc_call *call)
|
||||
{
|
||||
unsigned long expire_at;
|
||||
ktime_t now = ktime_get_real(), expire_at;
|
||||
|
||||
expire_at = jiffies + rxrpc_max_call_lifetime;
|
||||
expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
|
||||
call->expire_at = expire_at;
|
||||
call->ack_at = expire_at;
|
||||
call->resend_at = expire_at;
|
||||
call->timer.expires = expire_at + 1;
|
||||
rxrpc_set_timer(call, rxrpc_timer_begin);
|
||||
call->timer.expires = jiffies + LONG_MAX / 2;
|
||||
rxrpc_set_timer(call, rxrpc_timer_begin, now);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -328,7 +328,8 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
|
|||
call->resend_at = call->expire_at;
|
||||
call->ack_at = call->expire_at;
|
||||
spin_unlock_bh(&call->lock);
|
||||
rxrpc_set_timer(call, rxrpc_timer_init_for_reply);
|
||||
rxrpc_set_timer(call, rxrpc_timer_init_for_reply,
|
||||
ktime_get_real());
|
||||
}
|
||||
|
||||
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
|
||||
|
|
|
@ -20,29 +20,34 @@
|
|||
*/
|
||||
unsigned int rxrpc_max_backlog __read_mostly = 10;
|
||||
|
||||
/*
|
||||
* Maximum lifetime of a call (in mx).
|
||||
*/
|
||||
unsigned int rxrpc_max_call_lifetime = 60 * 1000;
|
||||
|
||||
/*
|
||||
* How long to wait before scheduling ACK generation after seeing a
|
||||
* packet with RXRPC_REQUEST_ACK set (in jiffies).
|
||||
* packet with RXRPC_REQUEST_ACK set (in ms).
|
||||
*/
|
||||
unsigned int rxrpc_requested_ack_delay = 1;
|
||||
|
||||
/*
|
||||
* How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
|
||||
* How long to wait before scheduling an ACK with subtype DELAY (in ms).
|
||||
*
|
||||
* We use this when we've received new data packets. If those packets aren't
|
||||
* all consumed within this time we will send a DELAY ACK if an ACK was not
|
||||
* requested to let the sender know it doesn't need to resend.
|
||||
*/
|
||||
unsigned int rxrpc_soft_ack_delay = 1 * HZ;
|
||||
unsigned int rxrpc_soft_ack_delay = 1 * 1000;
|
||||
|
||||
/*
|
||||
* How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
|
||||
* How long to wait before scheduling an ACK with subtype IDLE (in ms).
|
||||
*
|
||||
* We use this when we've consumed some previously soft-ACK'd packets when
|
||||
* further packets aren't immediately received to decide when to send an IDLE
|
||||
* ACK let the other end know that it can free up its Tx buffer space.
|
||||
*/
|
||||
unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
|
||||
unsigned int rxrpc_idle_ack_delay = 0.5 * 1000;
|
||||
|
||||
/*
|
||||
* Receive window size in packets. This indicates the maximum number of
|
||||
|
|
|
@ -149,13 +149,13 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
|
|||
_debug("need instant resend %d", ret);
|
||||
rxrpc_instant_resend(call, ix);
|
||||
} else {
|
||||
unsigned long resend_at;
|
||||
ktime_t now = ktime_get_real(), resend_at;
|
||||
|
||||
resend_at = jiffies + msecs_to_jiffies(rxrpc_resend_timeout);
|
||||
resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
|
||||
|
||||
if (time_before(resend_at, call->resend_at)) {
|
||||
if (ktime_before(resend_at, call->resend_at)) {
|
||||
call->resend_at = resend_at;
|
||||
rxrpc_set_timer(call, rxrpc_timer_set_for_send);
|
||||
rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
|
|||
.data = &rxrpc_requested_ack_delay,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_ms_jiffies,
|
||||
.proc_handler = proc_dointvec,
|
||||
.extra1 = (void *)&zero,
|
||||
},
|
||||
{
|
||||
|
@ -43,7 +43,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
|
|||
.data = &rxrpc_soft_ack_delay,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_ms_jiffies,
|
||||
.proc_handler = proc_dointvec,
|
||||
.extra1 = (void *)&one,
|
||||
},
|
||||
{
|
||||
|
@ -51,7 +51,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
|
|||
.data = &rxrpc_idle_ack_delay,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_ms_jiffies,
|
||||
.proc_handler = proc_dointvec,
|
||||
.extra1 = (void *)&one,
|
||||
},
|
||||
{
|
||||
|
@ -85,7 +85,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
|
|||
.data = &rxrpc_max_call_lifetime,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
.proc_handler = proc_dointvec,
|
||||
.extra1 = (void *)&one,
|
||||
},
|
||||
|
||||
|
|
Loading…
Reference in New Issue