gfs2: Allow some glocks to be used during withdraw
We need to allow some glocks to be enqueued, dequeued, promoted, and demoted when we're withdrawn. For example, to maintain metadata integrity, we should disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like iopen or the transaction glocks may be safely used because none of their metadata goes through the journal. So in general, we should disallow all glocks with an address space, and allow all the others. One exception is: we need to allow our active journal to be demoted so others may recover it. Allowing glocks after withdraw gives us the ability to take appropriate action (in a following patch) to have our journal properly replayed by another node rather than just abandoning the current transactions and pretending nothing bad happened, leaving the other nodes free to modify the blocks we had in our journal, which may result in file system corruption. Signed-off-by: Bob Peterson <rpeterso@redhat.com>
This commit is contained in:
parent
0d91061a37
commit
a72d2401f5
|
@ -133,6 +133,33 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* glock_blocked_by_withdraw - determine if we can still use a glock
|
||||
* @gl: the glock
|
||||
*
|
||||
* We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
|
||||
* when we're withdrawn. For example, to maintain metadata integrity, we should
|
||||
* disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
|
||||
* iopen or the transaction glocks may be safely used because none of their
|
||||
* metadata goes through the journal. So in general, we should disallow all
|
||||
* glocks that are journaled, and allow all the others. One exception is:
|
||||
* we need to allow our active journal to be promoted and demoted so others
|
||||
* may recover it and we can reacquire it when they're done.
|
||||
*/
|
||||
static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
|
||||
if (likely(!gfs2_withdrawn(sdp)))
|
||||
return false;
|
||||
if (gl->gl_ops->go_flags & GLOF_NONDISK)
|
||||
return false;
|
||||
if (!sdp->sd_jdesc ||
|
||||
gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
void gfs2_glock_free(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
|
@ -549,8 +576,7 @@ __acquires(&gl->gl_lockref.lock)
|
|||
unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
|
||||
int ret;
|
||||
|
||||
if (unlikely(gfs2_withdrawn(sdp)) &&
|
||||
target != LM_ST_UNLOCKED)
|
||||
if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl))
|
||||
return;
|
||||
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
|
||||
LM_FLAG_PRIORITY);
|
||||
|
@ -1194,10 +1220,9 @@ __acquires(&gl->gl_lockref.lock)
|
|||
int gfs2_glock_nq(struct gfs2_holder *gh)
|
||||
{
|
||||
struct gfs2_glock *gl = gh->gh_gl;
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
int error = 0;
|
||||
|
||||
if (unlikely(gfs2_withdrawn(sdp)))
|
||||
if (glock_blocked_by_withdraw(gl))
|
||||
return -EIO;
|
||||
|
||||
if (test_bit(GLF_LRU, &gl->gl_flags))
|
||||
|
|
|
@ -579,6 +579,7 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
|
|||
|
||||
const struct gfs2_glock_operations gfs2_meta_glops = {
|
||||
.go_type = LM_TYPE_META,
|
||||
.go_flags = GLOF_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_inode_glops = {
|
||||
|
@ -605,30 +606,33 @@ const struct gfs2_glock_operations gfs2_freeze_glops = {
|
|||
.go_xmote_bh = freeze_go_xmote_bh,
|
||||
.go_demote_ok = freeze_go_demote_ok,
|
||||
.go_type = LM_TYPE_NONDISK,
|
||||
.go_flags = GLOF_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_iopen_glops = {
|
||||
.go_type = LM_TYPE_IOPEN,
|
||||
.go_callback = iopen_go_callback,
|
||||
.go_flags = GLOF_LRU,
|
||||
.go_flags = GLOF_LRU | GLOF_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_flock_glops = {
|
||||
.go_type = LM_TYPE_FLOCK,
|
||||
.go_flags = GLOF_LRU,
|
||||
.go_flags = GLOF_LRU | GLOF_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_nondisk_glops = {
|
||||
.go_type = LM_TYPE_NONDISK,
|
||||
.go_flags = GLOF_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_quota_glops = {
|
||||
.go_type = LM_TYPE_QUOTA,
|
||||
.go_flags = GLOF_LVB | GLOF_LRU,
|
||||
.go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_journal_glops = {
|
||||
.go_type = LM_TYPE_JOURNAL,
|
||||
.go_flags = GLOF_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations *gfs2_glops_list[] = {
|
||||
|
|
|
@ -244,9 +244,10 @@ struct gfs2_glock_operations {
|
|||
void (*go_callback)(struct gfs2_glock *gl, bool remote);
|
||||
const int go_type;
|
||||
const unsigned long go_flags;
|
||||
#define GLOF_ASPACE 1
|
||||
#define GLOF_LVB 2
|
||||
#define GLOF_LRU 4
|
||||
#define GLOF_ASPACE 1 /* address space attached */
|
||||
#define GLOF_LVB 2 /* Lock Value Block attached */
|
||||
#define GLOF_LRU 4 /* LRU managed */
|
||||
#define GLOF_NONDISK 8 /* not I/O related */
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -541,6 +542,7 @@ struct gfs2_jdesc {
|
|||
struct list_head jd_revoke_list;
|
||||
unsigned int jd_replay_tail;
|
||||
|
||||
u64 jd_no_addr;
|
||||
};
|
||||
|
||||
struct gfs2_statfs_change_host {
|
||||
|
|
|
@ -552,6 +552,8 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
|
|||
mutex_lock(&sdp->sd_jindex_mutex);
|
||||
|
||||
for (;;) {
|
||||
struct gfs2_inode *jip;
|
||||
|
||||
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
|
||||
if (error)
|
||||
break;
|
||||
|
@ -591,6 +593,8 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
|
|||
|
||||
spin_lock(&sdp->sd_jindex_spin);
|
||||
jd->jd_jid = sdp->sd_journals++;
|
||||
jip = GFS2_I(jd->jd_inode);
|
||||
jd->jd_no_addr = jip->i_no_addr;
|
||||
list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
|
||||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue