staging/lustre/mgc: detach MGC dev on error

lustre_start_mgc() creates MGC device, if error happens later on
ll_fill_super(), this device is still attached, and later mount
fails by keep complaining that the MGC device's already in the
client node.

It turns out that the device was referenced by mgc config llog data
which is arranged in the mgc lock requeue thread re-trying to get its
mgc lock, and in normal case, this llog reference only released in
mgc_blocking_ast() when the system is umount.

This patch make mgc_precleanup() to wake up requeue thread to handle
the config llog data.

This patch also makes mgc_setup() wait for mgc_requeue_thread() start
before moving on.

Signed-off-by: Bobi Jam <bobijam.xu@intel.com>
Reviewed-on: http://review.whamcloud.com/11765
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-4943
Reviewed-by: Ryan Haasken <haasken@cray.com>
Signed-off-by: Oleg Drokin <oleg.drokin@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Bobi Jam 2015-03-25 21:53:21 -04:00 committed by Greg Kroah-Hartman
parent 1c8aa54aaf
commit 4345abb2c3
1 changed files with 25 additions and 11 deletions

View File

@ -484,9 +484,11 @@ int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
#define RQ_NOW 0x2
#define RQ_LATER 0x4
#define RQ_STOP 0x8
#define RQ_PRECLEANUP 0x10
static int rq_state;
static wait_queue_head_t rq_waitq;
static DECLARE_COMPLETION(rq_exit);
static DECLARE_COMPLETION(rq_start);
static void do_requeue(struct config_llog_data *cld)
{
@ -515,6 +517,8 @@ static void do_requeue(struct config_llog_data *cld)
static int mgc_requeue_thread(void *data)
{
bool first = true;
CDEBUG(D_MGC, "Starting requeue thread\n");
/* Keep trying failed locks periodically */
@ -531,13 +535,19 @@ static int mgc_requeue_thread(void *data)
rq_state &= ~(RQ_NOW | RQ_LATER);
spin_unlock(&config_list_lock);
if (first) {
first = false;
complete(&rq_start);
}
/* Always wait a few seconds to allow the server who
caused the lock revocation to finish its setup, plus some
random so everyone doesn't try to reconnect at once. */
to = MGC_TIMEOUT_MIN_SECONDS * HZ;
to += rand * HZ / 100; /* rand is centi-seconds */
lwi = LWI_TIMEOUT(to, NULL, NULL);
l_wait_event(rq_waitq, rq_state & RQ_STOP, &lwi);
l_wait_event(rq_waitq, rq_state & (RQ_STOP | RQ_PRECLEANUP),
&lwi);
/*
* iterate & processing through the list. for each cld, process
@ -550,6 +560,7 @@ static int mgc_requeue_thread(void *data)
cld_prev = NULL;
spin_lock(&config_list_lock);
rq_state &= ~RQ_PRECLEANUP;
list_for_each_entry(cld, &config_llog_list,
cld_list_chain) {
if (!cld->cld_lostlock)
@ -666,24 +677,26 @@ static atomic_t mgc_count = ATOMIC_INIT(0);
static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
int rc = 0;
int temp;
switch (stage) {
case OBD_CLEANUP_EARLY:
break;
case OBD_CLEANUP_EXPORTS:
if (atomic_dec_and_test(&mgc_count)) {
int running;
LASSERT(rq_state & RQ_RUNNING);
/* stop requeue thread */
spin_lock(&config_list_lock);
running = rq_state & RQ_RUNNING;
if (running)
rq_state |= RQ_STOP;
spin_unlock(&config_list_lock);
if (running) {
wake_up(&rq_waitq);
wait_for_completion(&rq_exit);
}
temp = RQ_STOP;
} else {
/* wakeup requeue thread to clean our cld */
temp = RQ_NOW | RQ_PRECLEANUP;
}
spin_lock(&config_list_lock);
rq_state |= temp;
spin_unlock(&config_list_lock);
wake_up(&rq_waitq);
if (temp & RQ_STOP)
wait_for_completion(&rq_exit);
obd_cleanup_client_import(obd);
rc = mgc_llog_fini(NULL, obd);
if (rc != 0)
@ -742,6 +755,7 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
}
/* rc is the task_struct pointer of mgc_requeue_thread. */
rc = 0;
wait_for_completion(&rq_start);
}
return rc;