mirror of https://gitee.com/openkylin/linux.git
SUNRPC: Add function rpc_sleep_on_timeout()
Clean up the RPC task sleep interfaces by replacing the task->tk_timeout 'hidden parameter' to rpc_sleep_on() with a new function that takes an absolute timeout. Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
8357a9b60f
commit
6b2e685627
|
@ -978,10 +978,8 @@ int nfs4_setup_sequence(struct nfs_client *client,
|
|||
if (res->sr_slot != NULL)
|
||||
goto out_start;
|
||||
|
||||
if (session) {
|
||||
if (session)
|
||||
tbl = &session->fc_slot_table;
|
||||
task->tk_timeout = 0;
|
||||
}
|
||||
|
||||
spin_lock(&tbl->slot_tbl_lock);
|
||||
/* The state manager will wait until the slot table is empty */
|
||||
|
@ -990,9 +988,8 @@ int nfs4_setup_sequence(struct nfs_client *client,
|
|||
|
||||
slot = nfs4_alloc_slot(tbl);
|
||||
if (IS_ERR(slot)) {
|
||||
/* Try again in 1/4 second */
|
||||
if (slot == ERR_PTR(-ENOMEM))
|
||||
task->tk_timeout = HZ >> 2;
|
||||
goto out_sleep_timeout;
|
||||
goto out_sleep;
|
||||
}
|
||||
spin_unlock(&tbl->slot_tbl_lock);
|
||||
|
@ -1004,7 +1001,16 @@ int nfs4_setup_sequence(struct nfs_client *client,
|
|||
nfs41_sequence_res_init(res);
|
||||
rpc_call_start(task);
|
||||
return 0;
|
||||
|
||||
out_sleep_timeout:
|
||||
/* Try again in 1/4 second */
|
||||
if (args->sa_privileged)
|
||||
rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
|
||||
jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED);
|
||||
else
|
||||
rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
|
||||
NULL, jiffies + (HZ >> 2));
|
||||
spin_unlock(&tbl->slot_tbl_lock);
|
||||
return -EAGAIN;
|
||||
out_sleep:
|
||||
if (args->sa_privileged)
|
||||
rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
|
||||
|
|
|
@ -35,7 +35,6 @@ struct rpc_wait {
|
|||
struct list_head list; /* wait queue links */
|
||||
struct list_head links; /* Links to related tasks */
|
||||
struct list_head timer_list; /* Timer list */
|
||||
unsigned long expires;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -227,8 +226,16 @@ void rpc_execute(struct rpc_task *);
|
|||
void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
|
||||
void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
|
||||
void rpc_destroy_wait_queue(struct rpc_wait_queue *);
|
||||
void rpc_sleep_on_timeout(struct rpc_wait_queue *queue,
|
||||
struct rpc_task *task,
|
||||
rpc_action action,
|
||||
unsigned long timeout);
|
||||
void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
|
||||
rpc_action action);
|
||||
void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *queue,
|
||||
struct rpc_task *task,
|
||||
unsigned long timeout,
|
||||
int priority);
|
||||
void rpc_sleep_on_priority(struct rpc_wait_queue *,
|
||||
struct rpc_task *,
|
||||
int priority);
|
||||
|
|
|
@ -581,8 +581,8 @@ gss_refresh_upcall(struct rpc_task *task)
|
|||
/* XXX: warning on the first, under the assumption we
|
||||
* shouldn't normally hit this case on a refresh. */
|
||||
warn_gssd();
|
||||
task->tk_timeout = 15*HZ;
|
||||
rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL);
|
||||
rpc_sleep_on_timeout(&pipe_version_rpc_waitqueue,
|
||||
task, NULL, jiffies + (15 * HZ));
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
@ -595,7 +595,6 @@ gss_refresh_upcall(struct rpc_task *task)
|
|||
if (gss_cred->gc_upcall != NULL)
|
||||
rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
|
||||
else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
|
||||
task->tk_timeout = 0;
|
||||
gss_cred->gc_upcall = gss_msg;
|
||||
/* gss_upcall_callback will release the reference to gss_upcall_msg */
|
||||
refcount_inc(&gss_msg->count);
|
||||
|
|
|
@ -1851,7 +1851,6 @@ call_bind(struct rpc_task *task)
|
|||
if (!xprt_prepare_transmit(task))
|
||||
return;
|
||||
|
||||
task->tk_timeout = xprt->bind_timeout;
|
||||
xprt->ops->rpcbind(task);
|
||||
}
|
||||
|
||||
|
|
|
@ -694,7 +694,8 @@ void rpcb_getport_async(struct rpc_task *task)
|
|||
|
||||
/* Put self on the wait queue to ensure we get notified if
|
||||
* some other task is already attempting to bind the port */
|
||||
rpc_sleep_on(&xprt->binding, task, NULL);
|
||||
rpc_sleep_on_timeout(&xprt->binding, task,
|
||||
NULL, jiffies + xprt->bind_timeout);
|
||||
|
||||
if (xprt_test_and_set_binding(xprt)) {
|
||||
dprintk("RPC: %5u %s: waiting for another binder\n",
|
||||
|
|
|
@ -66,7 +66,7 @@ struct workqueue_struct *xprtiod_workqueue __read_mostly;
|
|||
static void
|
||||
__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
|
||||
{
|
||||
if (task->tk_timeout == 0)
|
||||
if (list_empty(&task->u.tk_wait.timer_list))
|
||||
return;
|
||||
dprintk("RPC: %5u disabling timer\n", task->tk_pid);
|
||||
task->tk_timeout = 0;
|
||||
|
@ -86,17 +86,15 @@ rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
|
|||
* Set up a timer for the current task.
|
||||
*/
|
||||
static void
|
||||
__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
|
||||
__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
|
||||
unsigned long timeout)
|
||||
{
|
||||
if (!task->tk_timeout)
|
||||
return;
|
||||
|
||||
dprintk("RPC: %5u setting alarm for %u ms\n",
|
||||
task->tk_pid, jiffies_to_msecs(task->tk_timeout));
|
||||
task->tk_pid, jiffies_to_msecs(timeout - jiffies));
|
||||
|
||||
task->u.tk_wait.expires = jiffies + task->tk_timeout;
|
||||
if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
|
||||
rpc_set_queue_timer(queue, task->u.tk_wait.expires);
|
||||
task->tk_timeout = timeout;
|
||||
if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
|
||||
rpc_set_queue_timer(queue, timeout);
|
||||
list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
|
||||
}
|
||||
|
||||
|
@ -188,6 +186,7 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
|
|||
if (RPC_IS_QUEUED(task))
|
||||
return;
|
||||
|
||||
INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
|
||||
if (RPC_IS_PRIORITY(queue))
|
||||
__rpc_add_wait_queue_priority(queue, task, queue_priority);
|
||||
else if (RPC_IS_SWAPPER(task))
|
||||
|
@ -371,7 +370,17 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
|
|||
|
||||
__rpc_add_wait_queue(q, task, queue_priority);
|
||||
|
||||
__rpc_add_timer(q, task);
|
||||
}
|
||||
|
||||
static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
|
||||
struct rpc_task *task, unsigned long timeout,
|
||||
unsigned char queue_priority)
|
||||
{
|
||||
if (time_is_after_jiffies(timeout)) {
|
||||
__rpc_sleep_on_priority(q, task, queue_priority);
|
||||
__rpc_add_timer(q, task, timeout);
|
||||
} else
|
||||
task->tk_status = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
|
||||
|
@ -391,14 +400,32 @@ static bool rpc_sleep_check_activated(struct rpc_task *task)
|
|||
return true;
|
||||
}
|
||||
|
||||
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
|
||||
rpc_action action)
|
||||
void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
|
||||
rpc_action action, unsigned long timeout)
|
||||
{
|
||||
if (!rpc_sleep_check_activated(task))
|
||||
return;
|
||||
|
||||
rpc_set_tk_callback(task, action);
|
||||
|
||||
/*
|
||||
* Protect the queue operations.
|
||||
*/
|
||||
spin_lock_bh(&q->lock);
|
||||
__rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
|
||||
spin_unlock_bh(&q->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
|
||||
|
||||
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
|
||||
rpc_action action)
|
||||
{
|
||||
if (!rpc_sleep_check_activated(task))
|
||||
return;
|
||||
|
||||
rpc_set_tk_callback(task, action);
|
||||
|
||||
WARN_ON_ONCE(task->tk_timeout != 0);
|
||||
/*
|
||||
* Protect the queue operations.
|
||||
*/
|
||||
|
@ -408,12 +435,29 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_sleep_on);
|
||||
|
||||
void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
|
||||
struct rpc_task *task, unsigned long timeout, int priority)
|
||||
{
|
||||
if (!rpc_sleep_check_activated(task))
|
||||
return;
|
||||
|
||||
priority -= RPC_PRIORITY_LOW;
|
||||
/*
|
||||
* Protect the queue operations.
|
||||
*/
|
||||
spin_lock_bh(&q->lock);
|
||||
__rpc_sleep_on_priority_timeout(q, task, timeout, priority);
|
||||
spin_unlock_bh(&q->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
|
||||
|
||||
void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
|
||||
int priority)
|
||||
{
|
||||
if (!rpc_sleep_check_activated(task))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(task->tk_timeout != 0);
|
||||
priority -= RPC_PRIORITY_LOW;
|
||||
/*
|
||||
* Protect the queue operations.
|
||||
|
@ -711,7 +755,7 @@ static void __rpc_queue_timer_fn(struct timer_list *t)
|
|||
spin_lock(&queue->lock);
|
||||
expires = now = jiffies;
|
||||
list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
|
||||
timeo = task->u.tk_wait.expires;
|
||||
timeo = task->tk_timeout;
|
||||
if (time_after_eq(now, timeo)) {
|
||||
dprintk("RPC: %5u timeout\n", task->tk_pid);
|
||||
task->tk_status = -ETIMEDOUT;
|
||||
|
@ -737,8 +781,7 @@ static void __rpc_atrun(struct rpc_task *task)
|
|||
*/
|
||||
void rpc_delay(struct rpc_task *task, unsigned long delay)
|
||||
{
|
||||
task->tk_timeout = delay;
|
||||
rpc_sleep_on(&delay_queue, task, __rpc_atrun);
|
||||
rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_delay);
|
||||
|
||||
|
|
|
@ -209,9 +209,12 @@ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
|
|||
out_sleep:
|
||||
dprintk("RPC: %5u failed to lock transport %p\n",
|
||||
task->tk_pid, xprt);
|
||||
task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
|
||||
task->tk_status = -EAGAIN;
|
||||
rpc_sleep_on(&xprt->sending, task, NULL);
|
||||
if (RPC_IS_SOFT(task))
|
||||
rpc_sleep_on_timeout(&xprt->sending, task, NULL,
|
||||
jiffies + req->rq_timeout);
|
||||
else
|
||||
rpc_sleep_on(&xprt->sending, task, NULL);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
|
||||
|
@ -273,9 +276,12 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
|
|||
xprt_clear_locked(xprt);
|
||||
out_sleep:
|
||||
dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
|
||||
task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
|
||||
task->tk_status = -EAGAIN;
|
||||
rpc_sleep_on(&xprt->sending, task, NULL);
|
||||
if (RPC_IS_SOFT(task))
|
||||
rpc_sleep_on_timeout(&xprt->sending, task, NULL,
|
||||
jiffies + req->rq_timeout);
|
||||
else
|
||||
rpc_sleep_on(&xprt->sending, task, NULL);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
|
||||
|
@ -787,9 +793,9 @@ void xprt_connect(struct rpc_task *task)
|
|||
xprt->ops->close(xprt);
|
||||
|
||||
if (!xprt_connected(xprt)) {
|
||||
task->tk_timeout = task->tk_rqstp->rq_timeout;
|
||||
task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
|
||||
rpc_sleep_on(&xprt->pending, task, NULL);
|
||||
rpc_sleep_on_timeout(&xprt->pending, task, NULL,
|
||||
jiffies + task->tk_rqstp->rq_timeout);
|
||||
|
||||
if (test_bit(XPRT_CLOSING, &xprt->state))
|
||||
return;
|
||||
|
@ -1080,8 +1086,8 @@ void xprt_wait_for_reply_request_def(struct rpc_task *task)
|
|||
{
|
||||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
|
||||
task->tk_timeout = req->rq_timeout;
|
||||
rpc_sleep_on(&req->rq_xprt->pending, task, xprt_timer);
|
||||
rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
|
||||
jiffies + req->rq_timeout);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
|
||||
|
||||
|
@ -1099,12 +1105,14 @@ void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
|
|||
struct rpc_rtt *rtt = clnt->cl_rtt;
|
||||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
unsigned long max_timeout = clnt->cl_timeout->to_maxval;
|
||||
unsigned long timeout;
|
||||
|
||||
task->tk_timeout = rpc_calc_rto(rtt, timer);
|
||||
task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
|
||||
if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
|
||||
task->tk_timeout = max_timeout;
|
||||
rpc_sleep_on(&req->rq_xprt->pending, task, xprt_timer);
|
||||
timeout = rpc_calc_rto(rtt, timer);
|
||||
timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
|
||||
if (timeout > max_timeout || timeout == 0)
|
||||
timeout = max_timeout;
|
||||
rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
|
||||
jiffies + timeout);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
|
||||
|
||||
|
@ -1656,7 +1664,6 @@ void xprt_reserve(struct rpc_task *task)
|
|||
if (task->tk_rqstp != NULL)
|
||||
return;
|
||||
|
||||
task->tk_timeout = 0;
|
||||
task->tk_status = -EAGAIN;
|
||||
if (!xprt_throttle_congested(xprt, task))
|
||||
xprt_do_reserve(xprt, task);
|
||||
|
@ -1679,7 +1686,6 @@ void xprt_retry_reserve(struct rpc_task *task)
|
|||
if (task->tk_rqstp != NULL)
|
||||
return;
|
||||
|
||||
task->tk_timeout = 0;
|
||||
task->tk_status = -EAGAIN;
|
||||
xprt_do_reserve(xprt, task);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue