SUNRPC: Add trace points to observe transport congestion control

To help debug problems with RPC/RDMA credit management, replace
dprintk() call sites in the transport send lock paths with trace
events.

Similar trace points are defined for the non-congestion paths.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
Chuck Lever 2019-10-09 12:58:14 -04:00 committed by Anna Schumaker
parent 5cd8b0d4dd
commit bf7ca707ae
2 changed files with 106 additions and 9 deletions

View File

@ -777,6 +777,99 @@ TRACE_EVENT(xprt_ping,
__get_str(addr), __get_str(port), __entry->status)
);
DECLARE_EVENT_CLASS(xprt_writelock_event,
TP_PROTO(
const struct rpc_xprt *xprt, const struct rpc_task *task
),
TP_ARGS(xprt, task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(unsigned int, snd_task_id)
),
TP_fast_assign(
if (task) {
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client ?
task->tk_client->cl_clid : -1;
} else {
__entry->task_id = -1;
__entry->client_id = -1;
}
__entry->snd_task_id = xprt->snd_task ?
xprt->snd_task->tk_pid : -1;
),
TP_printk("task:%u@%u snd_task:%u",
__entry->task_id, __entry->client_id,
__entry->snd_task_id)
);
#define DEFINE_WRITELOCK_EVENT(name) \
DEFINE_EVENT(xprt_writelock_event, xprt_##name, \
TP_PROTO( \
const struct rpc_xprt *xprt, \
const struct rpc_task *task \
), \
TP_ARGS(xprt, task))
DEFINE_WRITELOCK_EVENT(reserve_xprt);
DEFINE_WRITELOCK_EVENT(release_xprt);
DECLARE_EVENT_CLASS(xprt_cong_event,
TP_PROTO(
const struct rpc_xprt *xprt, const struct rpc_task *task
),
TP_ARGS(xprt, task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(unsigned int, snd_task_id)
__field(unsigned long, cong)
__field(unsigned long, cwnd)
__field(bool, wait)
),
TP_fast_assign(
if (task) {
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client ?
task->tk_client->cl_clid : -1;
} else {
__entry->task_id = -1;
__entry->client_id = -1;
}
__entry->snd_task_id = xprt->snd_task ?
xprt->snd_task->tk_pid : -1;
__entry->cong = xprt->cong;
__entry->cwnd = xprt->cwnd;
__entry->wait = test_bit(XPRT_CWND_WAIT, &xprt->state);
),
TP_printk("task:%u@%u snd_task:%u cong=%lu cwnd=%lu%s",
__entry->task_id, __entry->client_id,
__entry->snd_task_id, __entry->cong, __entry->cwnd,
__entry->wait ? " (wait)" : "")
);
#define DEFINE_CONG_EVENT(name) \
DEFINE_EVENT(xprt_cong_event, xprt_##name, \
TP_PROTO( \
const struct rpc_xprt *xprt, \
const struct rpc_task *task \
), \
TP_ARGS(xprt, task))
DEFINE_CONG_EVENT(reserve_cong);
DEFINE_CONG_EVENT(release_cong);
DEFINE_CONG_EVENT(get_cong);
DEFINE_CONG_EVENT(put_cong);
TRACE_EVENT(xs_stream_read_data,
TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total),

View File

@ -205,20 +205,20 @@ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task)
return 1;
goto out_locked;
goto out_sleep;
}
if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
goto out_unlock;
xprt->snd_task = task;
out_locked:
trace_xprt_reserve_xprt(xprt, task);
return 1;
out_unlock:
xprt_clear_locked(xprt);
out_sleep:
dprintk("RPC: %5u failed to lock transport %p\n",
task->tk_pid, xprt);
task->tk_status = -EAGAIN;
if (RPC_IS_SOFT(task))
rpc_sleep_on_timeout(&xprt->sending, task, NULL,
@ -269,23 +269,22 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task)
return 1;
goto out_locked;
goto out_sleep;
}
if (req == NULL) {
xprt->snd_task = task;
return 1;
goto out_locked;
}
if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
goto out_unlock;
if (!xprt_need_congestion_window_wait(xprt)) {
xprt->snd_task = task;
return 1;
goto out_locked;
}
out_unlock:
xprt_clear_locked(xprt);
out_sleep:
dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
task->tk_status = -EAGAIN;
if (RPC_IS_SOFT(task))
rpc_sleep_on_timeout(&xprt->sending, task, NULL,
@ -293,6 +292,9 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
else
rpc_sleep_on(&xprt->sending, task, NULL);
return 0;
out_locked:
trace_xprt_reserve_cong(xprt, task);
return 1;
}
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
@ -357,6 +359,7 @@ void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
xprt_clear_locked(xprt);
__xprt_lock_write_next(xprt);
}
trace_xprt_release_xprt(xprt, task);
}
EXPORT_SYMBOL_GPL(xprt_release_xprt);
@ -374,6 +377,7 @@ void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
xprt_clear_locked(xprt);
__xprt_lock_write_next_cong(xprt);
}
trace_xprt_release_cong(xprt, task);
}
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
@ -395,8 +399,7 @@ __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
if (req->rq_cong)
return 1;
dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
req->rq_task->tk_pid, xprt->cong, xprt->cwnd);
trace_xprt_get_cong(xprt, req->rq_task);
if (RPCXPRT_CONGESTED(xprt)) {
xprt_set_congestion_window_wait(xprt);
return 0;
@ -418,6 +421,7 @@ __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
req->rq_cong = 0;
xprt->cong -= RPC_CWNDSCALE;
xprt_test_and_clear_congestion_window_wait(xprt);
trace_xprt_put_cong(xprt, req->rq_task);
__xprt_lock_write_next_cong(xprt);
}