2015-07-17 22:38:15 +08:00
|
|
|
/*
|
|
|
|
* (C) 2001 Clemson University and The University of Chicago
|
|
|
|
* (C) 2011 Omnibond Systems
|
|
|
|
*
|
|
|
|
* Changes by Acxiom Corporation to implement generic service_operation()
|
|
|
|
* function, Copyright Acxiom Corporation, 2005.
|
|
|
|
*
|
|
|
|
* See COPYING in top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In-kernel waitqueue operations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "protocol.h"
|
2015-12-05 01:56:14 +08:00
|
|
|
#include "orangefs-kernel.h"
|
|
|
|
#include "orangefs-bufmap.h"
|
2015-07-17 22:38:15 +08:00
|
|
|
|
2016-02-14 00:04:19 +08:00
|
|
|
static int wait_for_matching_downcall(struct orangefs_kernel_op_s *, long, bool);
|
2016-02-13 23:15:22 +08:00
|
|
|
static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *);
|
2016-01-22 11:58:58 +08:00
|
|
|
|
2015-07-17 22:38:15 +08:00
|
|
|
/*
|
|
|
|
* What we do in this function is to walk the list of operations that are
|
|
|
|
* present in the request queue and mark them as purged.
|
|
|
|
* NOTE: This is called from the device close after client-core has
|
|
|
|
* guaranteed that no new operations could appear on the list since the
|
|
|
|
* client-core is anyway going to exit.
|
|
|
|
*/
|
|
|
|
void purge_waiting_ops(void)
|
|
|
|
{
|
2015-11-25 04:12:14 +08:00
|
|
|
struct orangefs_kernel_op_s *op;
|
2015-07-17 22:38:15 +08:00
|
|
|
|
2015-11-25 04:12:14 +08:00
|
|
|
spin_lock(&orangefs_request_list_lock);
|
|
|
|
list_for_each_entry(op, &orangefs_request_list, list) {
|
2015-07-17 22:38:15 +08:00
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"pvfs2-client-core: purging op tag %llu %s\n",
|
|
|
|
llu(op->tag),
|
|
|
|
get_opname_string(op));
|
|
|
|
set_op_state_purged(op);
|
2016-03-04 02:46:48 +08:00
|
|
|
gossip_debug(GOSSIP_DEV_DEBUG,
|
|
|
|
"%s: op:%s: op_state:%d: process:%s:\n",
|
|
|
|
__func__,
|
|
|
|
get_opname_string(op),
|
|
|
|
op->op_state,
|
|
|
|
current->comm);
|
2015-07-17 22:38:15 +08:00
|
|
|
}
|
2015-11-25 04:12:14 +08:00
|
|
|
spin_unlock(&orangefs_request_list_lock);
|
2015-07-17 22:38:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-11-25 04:12:14 +08:00
|
|
|
* submits a ORANGEFS operation and waits for it to complete
|
2015-07-17 22:38:15 +08:00
|
|
|
*
|
|
|
|
* Note op->downcall.status will contain the status of the operation (in
|
|
|
|
* errno format), whether provided by pvfs2-client or a result of failure to
|
|
|
|
* service the operation. If the caller wishes to distinguish, then
|
|
|
|
* op->state can be checked to see if it was serviced or not.
|
|
|
|
*
|
|
|
|
* Returns contents of op->downcall.status for convenience
|
|
|
|
*/
|
2015-11-25 04:12:14 +08:00
|
|
|
int service_operation(struct orangefs_kernel_op_s *op,
|
2015-07-17 22:38:15 +08:00
|
|
|
const char *op_name,
|
|
|
|
int flags)
|
|
|
|
{
|
2016-02-14 00:04:19 +08:00
|
|
|
long timeout = MAX_SCHEDULE_TIMEOUT;
|
2015-07-17 22:38:15 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2015-12-15 03:54:46 +08:00
|
|
|
DEFINE_WAIT(wait_entry);
|
2015-07-17 22:38:15 +08:00
|
|
|
|
|
|
|
op->upcall.tgid = current->tgid;
|
|
|
|
op->upcall.pid = current->pid;
|
|
|
|
|
|
|
|
retry_servicing:
|
|
|
|
op->downcall.status = 0;
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2016-02-17 06:09:09 +08:00
|
|
|
"%s: %s op:%p: process:%s: pid:%d:\n",
|
|
|
|
__func__,
|
2015-07-17 22:38:15 +08:00
|
|
|
op_name,
|
2016-02-17 06:09:09 +08:00
|
|
|
op,
|
2015-07-17 22:38:15 +08:00
|
|
|
current->comm,
|
|
|
|
current->pid);
|
|
|
|
|
2016-02-25 05:54:27 +08:00
|
|
|
/*
|
|
|
|
* If ORANGEFS_OP_NO_MUTEX was set in flags, we need to avoid
|
2016-02-26 23:21:12 +08:00
|
|
|
* acquiring the request_mutex because we're servicing a
|
2016-02-25 05:54:27 +08:00
|
|
|
* high priority remount operation and the request_mutex is
|
|
|
|
* already taken.
|
|
|
|
*/
|
|
|
|
if (!(flags & ORANGEFS_OP_NO_MUTEX)) {
|
2016-02-13 23:49:24 +08:00
|
|
|
if (flags & ORANGEFS_OP_INTERRUPTIBLE)
|
|
|
|
ret = mutex_lock_interruptible(&request_mutex);
|
|
|
|
else
|
|
|
|
ret = mutex_lock_killable(&request_mutex);
|
2015-07-17 22:38:15 +08:00
|
|
|
/*
|
|
|
|
* check to see if we were interrupted while waiting for
|
2016-02-25 05:54:27 +08:00
|
|
|
* mutex
|
2015-07-17 22:38:15 +08:00
|
|
|
*/
|
|
|
|
if (ret < 0) {
|
|
|
|
op->downcall.status = ret;
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2016-02-26 23:21:12 +08:00
|
|
|
"%s: service_operation interrupted.\n",
|
|
|
|
__func__);
|
2015-07-17 22:38:15 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-13 23:38:23 +08:00
|
|
|
/* queue up the operation */
|
|
|
|
spin_lock(&orangefs_request_list_lock);
|
|
|
|
spin_lock(&op->lock);
|
|
|
|
set_op_state_waiting(op);
|
2016-03-04 02:46:48 +08:00
|
|
|
gossip_debug(GOSSIP_DEV_DEBUG,
|
|
|
|
"%s: op:%s: op_state:%d: process:%s:\n",
|
|
|
|
__func__,
|
|
|
|
get_opname_string(op),
|
|
|
|
op->op_state,
|
|
|
|
current->comm);
|
2016-02-25 05:54:27 +08:00
|
|
|
/* add high priority remount op to the front of the line. */
|
2016-02-13 23:38:23 +08:00
|
|
|
if (flags & ORANGEFS_OP_PRIORITY)
|
|
|
|
list_add(&op->list, &orangefs_request_list);
|
|
|
|
else
|
|
|
|
list_add_tail(&op->list, &orangefs_request_list);
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
wake_up_interruptible(&orangefs_request_list_waitq);
|
|
|
|
if (!__is_daemon_in_service()) {
|
2015-07-17 22:38:15 +08:00
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2016-02-13 23:38:23 +08:00
|
|
|
"%s:client core is NOT in service.\n",
|
2015-07-17 22:38:15 +08:00
|
|
|
__func__);
|
2016-02-14 00:04:19 +08:00
|
|
|
timeout = op_timeout_secs * HZ;
|
2015-07-17 22:38:15 +08:00
|
|
|
}
|
2016-02-13 23:38:23 +08:00
|
|
|
spin_unlock(&orangefs_request_list_lock);
|
2015-07-17 22:38:15 +08:00
|
|
|
|
2016-02-25 05:54:27 +08:00
|
|
|
if (!(flags & ORANGEFS_OP_NO_MUTEX))
|
2015-07-17 22:38:15 +08:00
|
|
|
mutex_unlock(&request_mutex);
|
|
|
|
|
2016-02-14 00:04:19 +08:00
|
|
|
ret = wait_for_matching_downcall(op, timeout,
|
|
|
|
flags & ORANGEFS_OP_INTERRUPTIBLE);
|
2016-02-17 06:09:09 +08:00
|
|
|
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s: wait_for_matching_downcall returned %d for %p\n",
|
|
|
|
__func__,
|
|
|
|
ret,
|
|
|
|
op);
|
|
|
|
|
2016-02-26 23:21:12 +08:00
|
|
|
/* got matching downcall; make sure status is in errno format */
|
2016-02-14 00:04:19 +08:00
|
|
|
if (!ret) {
|
2016-02-13 23:15:22 +08:00
|
|
|
spin_unlock(&op->lock);
|
2015-07-17 22:38:15 +08:00
|
|
|
op->downcall.status =
|
2015-11-25 04:12:14 +08:00
|
|
|
orangefs_normalize_to_errno(op->downcall.status);
|
2015-07-17 22:38:15 +08:00
|
|
|
ret = op->downcall.status;
|
2016-02-14 00:04:19 +08:00
|
|
|
goto out;
|
2015-07-17 22:38:15 +08:00
|
|
|
}
|
|
|
|
|
2016-02-14 00:04:19 +08:00
|
|
|
/* failed to get matching downcall */
|
|
|
|
if (ret == -ETIMEDOUT) {
|
2016-02-25 05:54:27 +08:00
|
|
|
gossip_err("%s: %s -- wait timed out; aborting attempt.\n",
|
|
|
|
__func__,
|
2016-02-14 00:04:19 +08:00
|
|
|
op_name);
|
|
|
|
}
|
2016-02-25 05:54:27 +08:00
|
|
|
|
|
|
|
/*
|
2016-02-26 23:21:12 +08:00
|
|
|
* remove a waiting op from the request list or
|
|
|
|
* remove an in-progress op from the in-progress list.
|
2016-02-25 05:54:27 +08:00
|
|
|
*/
|
2016-02-14 00:04:19 +08:00
|
|
|
orangefs_clean_up_interrupted_operation(op);
|
2016-02-25 05:54:27 +08:00
|
|
|
|
2016-02-14 00:04:19 +08:00
|
|
|
op->downcall.status = ret;
|
2015-07-17 22:38:15 +08:00
|
|
|
/* retry if operation has not been serviced and if requested */
|
2016-02-14 00:04:19 +08:00
|
|
|
if (ret == -EAGAIN) {
|
|
|
|
op->attempts++;
|
|
|
|
timeout = op_timeout_secs * HZ;
|
2015-07-17 22:38:15 +08:00
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2015-11-25 04:12:14 +08:00
|
|
|
"orangefs: tag %llu (%s)"
|
2015-07-17 22:38:15 +08:00
|
|
|
" -- operation to be retried (%d attempt)\n",
|
|
|
|
llu(op->tag),
|
|
|
|
op_name,
|
2016-02-14 00:04:19 +08:00
|
|
|
op->attempts);
|
2015-07-17 22:38:15 +08:00
|
|
|
|
2016-02-25 05:54:27 +08:00
|
|
|
/*
|
|
|
|
* io ops (ops that use the shared memory buffer) have
|
|
|
|
* to be returned to their caller for a retry. Other ops
|
|
|
|
* can just be recycled here.
|
|
|
|
*/
|
2015-07-17 22:38:15 +08:00
|
|
|
if (!op->uses_shared_memory)
|
|
|
|
goto retry_servicing;
|
|
|
|
}
|
|
|
|
|
2016-02-14 00:04:19 +08:00
|
|
|
out:
|
2015-07-17 22:38:15 +08:00
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2016-03-04 02:46:48 +08:00
|
|
|
"%s: %s returning: %d for %p.\n",
|
|
|
|
__func__,
|
2015-07-17 22:38:15 +08:00
|
|
|
op_name,
|
|
|
|
ret,
|
|
|
|
op);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-02-26 23:21:12 +08:00
|
|
|
/* This can get called on an I/O op if it had a bad service_operation. */
|
2016-02-12 12:07:19 +08:00
|
|
|
bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op)
|
|
|
|
{
|
|
|
|
u64 tag = op->tag;
|
|
|
|
if (!op_state_in_progress(op))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
op->slot_to_free = op->upcall.req.io.buf_index;
|
|
|
|
memset(&op->upcall, 0, sizeof(op->upcall));
|
|
|
|
memset(&op->downcall, 0, sizeof(op->downcall));
|
|
|
|
op->upcall.type = ORANGEFS_VFS_OP_CANCEL;
|
|
|
|
op->upcall.req.cancel.op_tag = tag;
|
|
|
|
op->downcall.type = ORANGEFS_VFS_OP_INVALID;
|
|
|
|
op->downcall.status = -1;
|
|
|
|
orangefs_new_tag(op);
|
|
|
|
|
|
|
|
spin_lock(&orangefs_request_list_lock);
|
|
|
|
/* orangefs_request_list_lock is enough of a barrier here */
|
|
|
|
if (!__is_daemon_in_service()) {
|
|
|
|
spin_unlock(&orangefs_request_list_lock);
|
|
|
|
return false;
|
|
|
|
}
|
2016-02-13 23:38:23 +08:00
|
|
|
spin_lock(&op->lock);
|
|
|
|
set_op_state_waiting(op);
|
2016-03-04 02:46:48 +08:00
|
|
|
gossip_debug(GOSSIP_DEV_DEBUG,
|
|
|
|
"%s: op:%s: op_state:%d: process:%s:\n",
|
|
|
|
__func__,
|
|
|
|
get_opname_string(op),
|
|
|
|
op->op_state,
|
|
|
|
current->comm);
|
2016-02-13 23:38:23 +08:00
|
|
|
list_add(&op->list, &orangefs_request_list);
|
|
|
|
spin_unlock(&op->lock);
|
2016-02-12 12:07:19 +08:00
|
|
|
spin_unlock(&orangefs_request_list_lock);
|
|
|
|
|
2016-02-26 23:21:12 +08:00
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2016-02-12 12:07:19 +08:00
|
|
|
"Attempting ORANGEFS operation cancellation of tag %llu\n",
|
|
|
|
llu(tag));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-02-26 23:21:12 +08:00
|
|
|
/*
|
|
|
|
* Change an op to the "given up" state and remove it from its list.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
|
2015-07-17 22:38:15 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* handle interrupted cases depending on what state we were in when
|
2016-02-26 23:21:12 +08:00
|
|
|
* the interruption is detected.
|
2015-07-17 22:38:15 +08:00
|
|
|
*
|
2016-01-24 02:09:05 +08:00
|
|
|
* Called with op->lock held.
|
2015-07-17 22:38:15 +08:00
|
|
|
*/
|
2016-02-26 23:21:12 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* List manipulation code elsewhere will ignore ops that
|
|
|
|
* have been given up upon.
|
|
|
|
*/
|
2016-01-23 08:47:47 +08:00
|
|
|
op->op_state |= OP_VFS_STATE_GIVEN_UP;
|
2016-02-26 23:21:12 +08:00
|
|
|
|
2016-02-19 07:59:44 +08:00
|
|
|
if (list_empty(&op->list)) {
|
|
|
|
/* caught copying to/from daemon */
|
|
|
|
BUG_ON(op_state_serviced(op));
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
wait_for_completion(&op->waitq);
|
|
|
|
} else if (op_state_waiting(op)) {
|
2015-07-17 22:38:15 +08:00
|
|
|
/*
|
|
|
|
* upcall hasn't been read; remove op from upcall request
|
|
|
|
* list.
|
|
|
|
*/
|
|
|
|
spin_unlock(&op->lock);
|
2016-01-23 08:47:47 +08:00
|
|
|
spin_lock(&orangefs_request_list_lock);
|
2016-02-19 07:59:44 +08:00
|
|
|
list_del_init(&op->list);
|
2016-01-23 08:47:47 +08:00
|
|
|
spin_unlock(&orangefs_request_list_lock);
|
2015-07-17 22:38:15 +08:00
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"Interrupted: Removed op %p from request_list\n",
|
|
|
|
op);
|
|
|
|
} else if (op_state_in_progress(op)) {
|
|
|
|
/* op must be removed from the in progress htable */
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
spin_lock(&htable_ops_in_progress_lock);
|
2016-02-19 07:59:44 +08:00
|
|
|
list_del_init(&op->list);
|
2015-07-17 22:38:15 +08:00
|
|
|
spin_unlock(&htable_ops_in_progress_lock);
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"Interrupted: Removed op %p"
|
|
|
|
" from htable_ops_in_progress\n",
|
|
|
|
op);
|
2016-02-19 07:59:44 +08:00
|
|
|
} else {
|
2015-07-17 22:38:15 +08:00
|
|
|
spin_unlock(&op->lock);
|
|
|
|
gossip_err("interrupted operation is in a weird state 0x%x\n",
|
|
|
|
op->op_state);
|
|
|
|
}
|
2016-02-13 23:15:22 +08:00
|
|
|
reinit_completion(&op->waitq);
|
2015-07-17 22:38:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-02-26 23:21:12 +08:00
|
|
|
* Sleeps on waitqueue waiting for matching downcall.
|
|
|
|
* If client-core finishes servicing, then we are good to go.
|
2015-07-17 22:38:15 +08:00
|
|
|
* else if client-core exits, we get woken up here, and retry with a timeout
|
|
|
|
*
|
2016-02-26 23:21:12 +08:00
|
|
|
* When this call returns to the caller, the specified op will no
|
|
|
|
* longer be in either the in_progress hash table or on the request list.
|
2015-07-17 22:38:15 +08:00
|
|
|
*
|
|
|
|
* Returns 0 on success and -errno on failure
|
|
|
|
* Errors are:
|
|
|
|
* EAGAIN in case we want the caller to requeue and try again..
|
|
|
|
* EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
|
|
|
|
* operation since client-core seems to be exiting too often
|
|
|
|
* or if we were interrupted.
|
2016-02-13 23:15:22 +08:00
|
|
|
*
|
|
|
|
* Returns with op->lock taken.
|
2015-07-17 22:38:15 +08:00
|
|
|
*/
|
2016-02-13 23:49:24 +08:00
|
|
|
static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op,
|
2016-02-14 00:04:19 +08:00
|
|
|
long timeout,
|
2016-02-13 23:49:24 +08:00
|
|
|
bool interruptible)
|
2015-07-17 22:38:15 +08:00
|
|
|
{
|
2016-02-14 00:04:19 +08:00
|
|
|
long n;
|
2016-02-13 23:49:24 +08:00
|
|
|
|
2016-02-26 23:21:12 +08:00
|
|
|
/*
|
|
|
|
* There's a "schedule_timeout" inside of these wait
|
|
|
|
* primitives, during which the op is out of the hands of the
|
|
|
|
* user process that needs something done and is being
|
|
|
|
* manipulated by the client-core process.
|
|
|
|
*/
|
2016-02-13 23:49:24 +08:00
|
|
|
if (interruptible)
|
2016-02-25 05:54:27 +08:00
|
|
|
n = wait_for_completion_interruptible_timeout(&op->waitq,
|
|
|
|
timeout);
|
2016-02-13 23:49:24 +08:00
|
|
|
else
|
|
|
|
n = wait_for_completion_killable_timeout(&op->waitq, timeout);
|
|
|
|
|
2016-02-13 23:15:22 +08:00
|
|
|
spin_lock(&op->lock);
|
2015-07-17 22:38:15 +08:00
|
|
|
|
2016-02-13 23:15:22 +08:00
|
|
|
if (op_state_serviced(op))
|
|
|
|
return 0;
|
2016-01-24 02:04:19 +08:00
|
|
|
|
2016-02-13 23:15:22 +08:00
|
|
|
if (unlikely(n < 0)) {
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2016-03-04 02:46:48 +08:00
|
|
|
"%s: operation interrupted, tag %llu, %p\n",
|
2016-02-13 23:15:22 +08:00
|
|
|
__func__,
|
|
|
|
llu(op->tag),
|
|
|
|
op);
|
|
|
|
return -EINTR;
|
2015-07-17 22:38:15 +08:00
|
|
|
}
|
2016-02-13 23:15:22 +08:00
|
|
|
if (op_state_purged(op)) {
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2016-03-04 02:46:48 +08:00
|
|
|
"%s: operation purged, tag %llu, %p, %d\n",
|
2016-02-13 23:15:22 +08:00
|
|
|
__func__,
|
|
|
|
llu(op->tag),
|
|
|
|
op,
|
|
|
|
op->attempts);
|
|
|
|
return (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
|
|
|
|
-EAGAIN :
|
|
|
|
-EIO;
|
|
|
|
}
|
|
|
|
/* must have timed out, then... */
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2016-03-04 02:46:48 +08:00
|
|
|
"%s: operation timed out, tag %llu, %p, %d)\n",
|
2016-02-13 23:15:22 +08:00
|
|
|
__func__,
|
|
|
|
llu(op->tag),
|
|
|
|
op,
|
|
|
|
op->attempts);
|
|
|
|
return -ETIMEDOUT;
|
2015-07-17 22:38:15 +08:00
|
|
|
}
|