We've got eight GFS2 patches for this merge window:
1. Andreas Gruenbacher has four patches related to cleaning up the GFS2 inode evict process. This is about half of his patches designed to fix a long-standing GFS2 hang related to the inode shrinker. (Shrinker calls gfs2 evict, evict calls DLM, DLM requires memory and blocks on the shrinker.) These 4 patches have been well tested. His second set of patches are still being tested, so I plan to hold them until the next merge window, after we have more weeks of testing. The first patch eliminates the flush_delayed_work, which can block. 2. Andreas's second patch protects setting of gl_object for rgrps with a spin_lock to prevent proven races. 3. His third patch introduces a centralized mechanism for queueing glock work with better reference counting, to prevent more races. 4. His fourth patch retains a reference to inode glocks when an error occurs while creating an inode. This keeps the subsequent evict from needing to reacquire the glock, which might call into DLM and block in low memory conditions. 5. Arvind Yadav has a patch to add const to attribute_group structures. 6. I have a patch to detect directory entry inconsistencies and withdraw the file system if any are found. Better that than silent corruption. 7. I have a patch to remove a vestigial variable from glock structures, saving some slab space. 8. I have another patch to remove a vestigial variable from the GFS2 in-core superblock structure. -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJZXOIfAAoJENeLYdPf93o7RVcH/jLEK3hmZOd94pDTYg3Damuo KI3xjyutDgQT83uwg8p5UBPwRYCDnyiOLwOWGBJJvjPEI1S4syrXq/FzOmxmX6cV nE28ARL/OXCoFEXBMUVHvHL3nK+zEUr8rO6Xz51B1ifVq7GV8iVK+ZgxzRhx0PWP f+0SVHiQtU0HKyxR5y9p43oygtHZaGbjy4WL0YbmFZM59y5q9A8rBHFACn2JyPBm /zXN6gF/Orao+BDXLT6OM3vNXZcOQ7FUPWwctguHsAO/bLzWiISyfJxLWJsHvSdW tzFTN1DByjXvqAhs4HTSuh9JfBDAyxcXkmczXJyATBkCTEJv42Iev+ILmre+wwQ= =YTwn -----END PGP SIGNATURE----- Merge tag 'gfs2-4.13.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2 Pull GFS2 updates from Bob Peterson: "We've got eight GFS2 patches for this merge window: - Andreas Gruenbacher has four patches related to cleaning up the GFS2 inode evict process. This is about half of his patches designed to fix a long-standing GFS2 hang related to the inode shrinker: Shrinker calls gfs2 evict, evict calls DLM, DLM requires memory and blocks on the shrinker. These four patches have been well tested. His second set of patches are still being tested, so I plan to hold them until the next merge window, after we have more weeks of testing. The first patch eliminates the flush_delayed_work, which can block. - Andreas's second patch protects setting of gl_object for rgrps with a spin_lock to prevent proven races. - His third patch introduces a centralized mechanism for queueing glock work with better reference counting, to prevent more races. -His fourth patch retains a reference to inode glocks when an error occurs while creating an inode. This keeps the subsequent evict from needing to reacquire the glock, which might call into DLM and block in low memory conditions. - Arvind Yadav has a patch to add const to attribute_group structures. - I have a patch to detect directory entry inconsistencies and withdraw the file system if any are found. Better that than silent corruption. - I have a patch to remove a vestigial variable from glock structures, saving some slab space. - I have another patch to remove a vestigial variable from the GFS2 in-core superblock structure" * tag 'gfs2-4.13.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: GFS2: constify attribute_group structures. gfs2: gfs2_create_inode: Keep glock across iput gfs2: Clean up glock work enqueuing gfs2: Protect gl->gl_object by spin lock gfs2: Get rid of flush_delayed_work in gfs2_evict_inode GFS2: Eliminate vestigial sd_log_flush_wrapped GFS2: Remove gl_list from glock structure GFS2: Withdraw when directory entry inconsistencies are detected
This commit is contained in:
commit
c96e6dabfb
|
@ -970,7 +970,7 @@ static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
|
|||
continue;
|
||||
bn = be64_to_cpu(*p);
|
||||
if (gfs2_holder_initialized(rd_gh)) {
|
||||
rgd = (struct gfs2_rgrpd *)rd_gh->gh_gl->gl_object;
|
||||
rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
|
||||
gfs2_assert_withdraw(sdp,
|
||||
gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
|
||||
} else {
|
||||
|
|
|
@ -1444,7 +1444,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, struct dir_context *ctx,
|
|||
"g.offset (%u)\n",
|
||||
(unsigned long long)bh->b_blocknr,
|
||||
entries2, g.offset);
|
||||
|
||||
gfs2_consist_inode(ip);
|
||||
error = -EIO;
|
||||
goto out_free;
|
||||
}
|
||||
|
@ -1612,6 +1612,7 @@ int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
|
|||
(unsigned long long)dip->i_no_addr,
|
||||
dip->i_entries,
|
||||
g.offset);
|
||||
gfs2_consist_inode(dip);
|
||||
error = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
@ -2031,8 +2032,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
|
|||
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
|
||||
|
||||
for (x = 0; x < rlist.rl_rgrps; x++) {
|
||||
struct gfs2_rgrpd *rgd;
|
||||
rgd = rlist.rl_ghs[x].gh_gl->gl_object;
|
||||
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
|
||||
|
||||
rg_blocks += rgd->rd_length;
|
||||
}
|
||||
|
||||
|
|
124
fs/gfs2/glock.c
124
fs/gfs2/glock.c
|
@ -152,20 +152,34 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
|
|||
spin_unlock(&lru_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_glock_put() - Decrement reference count on glock
|
||||
* @gl: The glock to put
|
||||
*
|
||||
/*
|
||||
* Enqueue the glock on the work queue. Passes one glock reference on to the
|
||||
* work queue.
|
||||
*/
|
||||
static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
|
||||
if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
|
||||
/*
|
||||
* We are holding the lockref spinlock, and the work was still
|
||||
* queued above. The queued work (glock_work_func) takes that
|
||||
* spinlock before dropping its glock reference(s), so it
|
||||
* cannot have dropped them in the meantime.
|
||||
*/
|
||||
GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
|
||||
gl->gl_lockref.count--;
|
||||
}
|
||||
}
|
||||
|
||||
void gfs2_glock_put(struct gfs2_glock *gl)
|
||||
static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
__gfs2_glock_queue_work(gl, delay);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
}
|
||||
|
||||
static void __gfs2_glock_put(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct address_space *mapping = gfs2_glock2aspace(gl);
|
||||
|
||||
if (lockref_put_or_lock(&gl->gl_lockref))
|
||||
return;
|
||||
|
||||
lockref_mark_dead(&gl->gl_lockref);
|
||||
|
||||
gfs2_glock_remove_from_lru(gl);
|
||||
|
@ -177,6 +191,20 @@ void gfs2_glock_put(struct gfs2_glock *gl)
|
|||
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_glock_put() - Decrement reference count on glock
|
||||
* @gl: The glock to put
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_glock_put(struct gfs2_glock *gl)
|
||||
{
|
||||
if (lockref_put_or_lock(&gl->gl_lockref))
|
||||
return;
|
||||
|
||||
__gfs2_glock_put(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
* may_grant - check if its ok to grant a new lock
|
||||
* @gl: The glock
|
||||
|
@ -482,8 +510,7 @@ __acquires(&gl->gl_lockref.lock)
|
|||
target == LM_ST_UNLOCKED &&
|
||||
test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
|
||||
finish_xmote(gl, target);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
gfs2_glock_queue_work(gl, 0);
|
||||
}
|
||||
else if (ret) {
|
||||
pr_err("lm_lock ret %d\n", ret);
|
||||
|
@ -492,8 +519,7 @@ __acquires(&gl->gl_lockref.lock)
|
|||
}
|
||||
} else { /* lock_nolock */
|
||||
finish_xmote(gl, target);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
gfs2_glock_queue_work(gl, 0);
|
||||
}
|
||||
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
|
@ -565,8 +591,7 @@ __acquires(&gl->gl_lockref.lock)
|
|||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
smp_mb__after_atomic();
|
||||
gl->gl_lockref.count++;
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gl->gl_lockref.count--;
|
||||
__gfs2_glock_queue_work(gl, 0);
|
||||
return;
|
||||
|
||||
out_unlock:
|
||||
|
@ -601,11 +626,11 @@ static void glock_work_func(struct work_struct *work)
|
|||
{
|
||||
unsigned long delay = 0;
|
||||
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
|
||||
int drop_ref = 0;
|
||||
unsigned int drop_refs = 1;
|
||||
|
||||
if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
|
||||
finish_xmote(gl, gl->gl_reply);
|
||||
drop_ref = 1;
|
||||
drop_refs++;
|
||||
}
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
|
||||
|
@ -623,17 +648,25 @@ static void glock_work_func(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
run_queue(gl, 0);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (!delay)
|
||||
gfs2_glock_put(gl);
|
||||
else {
|
||||
if (delay) {
|
||||
/* Keep one glock reference for the work we requeue. */
|
||||
drop_refs--;
|
||||
if (gl->gl_name.ln_type != LM_TYPE_INODE)
|
||||
delay = 0;
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
__gfs2_glock_queue_work(gl, delay);
|
||||
}
|
||||
if (drop_ref)
|
||||
gfs2_glock_put(gl);
|
||||
|
||||
/*
|
||||
* Drop the remaining glock references manually here. (Mind that
|
||||
* __gfs2_glock_queue_work depends on the lockref spinlock begin held
|
||||
* here as well.)
|
||||
*/
|
||||
gl->gl_lockref.count -= drop_refs;
|
||||
if (!gl->gl_lockref.count) {
|
||||
__gfs2_glock_put(gl);
|
||||
return;
|
||||
}
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -986,8 +1019,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
|
|||
test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
|
||||
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
|
||||
gl->gl_lockref.count++;
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gl->gl_lockref.count--;
|
||||
__gfs2_glock_queue_work(gl, 0);
|
||||
}
|
||||
run_queue(gl, 1);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
|
@ -1047,17 +1079,15 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
|
|||
gfs2_glock_add_to_lru(gl);
|
||||
|
||||
trace_gfs2_glock_queue(gh, 0);
|
||||
if (unlikely(!fast_path)) {
|
||||
gl->gl_lockref.count++;
|
||||
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
|
||||
!test_bit(GLF_DEMOTE, &gl->gl_flags) &&
|
||||
gl->gl_name.ln_type == LM_TYPE_INODE)
|
||||
delay = gl->gl_hold_time;
|
||||
__gfs2_glock_queue_work(gl, delay);
|
||||
}
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (likely(fast_path))
|
||||
return;
|
||||
|
||||
gfs2_glock_hold(gl);
|
||||
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
|
||||
!test_bit(GLF_DEMOTE, &gl->gl_flags) &&
|
||||
gl->gl_name.ln_type == LM_TYPE_INODE)
|
||||
delay = gl->gl_hold_time;
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
||||
void gfs2_glock_dq_wait(struct gfs2_holder *gh)
|
||||
|
@ -1233,9 +1263,8 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
|
|||
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
handle_callback(gl, state, delay, true);
|
||||
__gfs2_glock_queue_work(gl, delay);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1294,10 +1323,8 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
|
|||
|
||||
gl->gl_lockref.count++;
|
||||
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
|
||||
__gfs2_glock_queue_work(gl, 0);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
||||
static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
|
||||
|
@ -1355,8 +1382,7 @@ __acquires(&lru_lock)
|
|||
if (demote_ok(gl))
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
|
||||
WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gl->gl_lockref.count--;
|
||||
__gfs2_glock_queue_work(gl, 0);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
cond_resched_lock(&lru_lock);
|
||||
}
|
||||
|
@ -1462,13 +1488,12 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
|
|||
|
||||
static void thaw_glock(struct gfs2_glock *gl)
|
||||
{
|
||||
if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
|
||||
goto out;
|
||||
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
|
||||
out:
|
||||
if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
|
||||
gfs2_glock_put(gl);
|
||||
return;
|
||||
}
|
||||
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
|
||||
gfs2_glock_queue_work(gl, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1484,9 +1509,8 @@ static void clear_glock(struct gfs2_glock *gl)
|
|||
spin_lock(&gl->gl_lockref.lock);
|
||||
if (gl->gl_state != LM_ST_UNLOCKED)
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
|
||||
__gfs2_glock_queue_work(gl, 0);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -257,4 +257,11 @@ static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
|
|||
return gh->gh_gl;
|
||||
}
|
||||
|
||||
static inline void glock_set_object(struct gfs2_glock *gl, void *object)
|
||||
{
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
gl->gl_object = object;
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
}
|
||||
|
||||
#endif /* __GLOCK_DOT_H__ */
|
||||
|
|
|
@ -137,7 +137,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
|
|||
*
|
||||
* Called when demoting or unlocking an EX glock. We must flush
|
||||
* to disk all dirty buffers/pages relating to this glock, and must not
|
||||
* not return to caller to demote/unlock the glock until I/O is complete.
|
||||
* return to caller to demote/unlock the glock until I/O is complete.
|
||||
*/
|
||||
|
||||
static void rgrp_go_sync(struct gfs2_glock *gl)
|
||||
|
@ -184,7 +184,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
|
|||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct address_space *mapping = &sdp->sd_aspace;
|
||||
struct gfs2_rgrpd *rgd = gl->gl_object;
|
||||
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
|
||||
|
||||
if (rgd)
|
||||
gfs2_rgrp_brelse(rgd);
|
||||
|
@ -197,6 +197,38 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
|
|||
rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
|
||||
}
|
||||
|
||||
static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_inode *ip;
|
||||
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
ip = gl->gl_object;
|
||||
if (ip)
|
||||
set_bit(GIF_GLOP_PENDING, &ip->i_flags);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
return ip;
|
||||
}
|
||||
|
||||
struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_rgrpd *rgd;
|
||||
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
rgd = gl->gl_object;
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
|
||||
return rgd;
|
||||
}
|
||||
|
||||
static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
|
||||
{
|
||||
if (!ip)
|
||||
return;
|
||||
|
||||
clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
|
||||
wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_sync - Sync the dirty data and/or metadata for an inode glock
|
||||
* @gl: the glock protecting the inode
|
||||
|
@ -205,25 +237,24 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
|
|||
|
||||
static void inode_go_sync(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_inode *ip = gl->gl_object;
|
||||
struct gfs2_inode *ip = gfs2_glock2inode(gl);
|
||||
int isreg = ip && S_ISREG(ip->i_inode.i_mode);
|
||||
struct address_space *metamapping = gfs2_glock2aspace(gl);
|
||||
int error;
|
||||
|
||||
if (ip && !S_ISREG(ip->i_inode.i_mode))
|
||||
ip = NULL;
|
||||
if (ip) {
|
||||
if (isreg) {
|
||||
if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
|
||||
unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
|
||||
inode_dio_wait(&ip->i_inode);
|
||||
}
|
||||
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
|
||||
|
||||
gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
|
||||
filemap_fdatawrite(metamapping);
|
||||
if (ip) {
|
||||
if (isreg) {
|
||||
struct address_space *mapping = ip->i_inode.i_mapping;
|
||||
filemap_fdatawrite(mapping);
|
||||
error = filemap_fdatawait(mapping);
|
||||
|
@ -238,6 +269,9 @@ static void inode_go_sync(struct gfs2_glock *gl)
|
|||
*/
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(GLF_DIRTY, &gl->gl_flags);
|
||||
|
||||
out:
|
||||
gfs2_clear_glop_pending(ip);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -253,7 +287,7 @@ static void inode_go_sync(struct gfs2_glock *gl)
|
|||
|
||||
static void inode_go_inval(struct gfs2_glock *gl, int flags)
|
||||
{
|
||||
struct gfs2_inode *ip = gl->gl_object;
|
||||
struct gfs2_inode *ip = gfs2_glock2inode(gl);
|
||||
|
||||
gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
|
||||
|
||||
|
@ -274,6 +308,8 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
|
|||
}
|
||||
if (ip && S_ISREG(ip->i_inode.i_mode))
|
||||
truncate_inode_pages(ip->i_inode.i_mapping, 0);
|
||||
|
||||
gfs2_clear_glop_pending(ip);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -541,7 +577,7 @@ static int freeze_go_demote_ok(const struct gfs2_glock *gl)
|
|||
*/
|
||||
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
|
||||
{
|
||||
struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
|
||||
struct gfs2_inode *ip = gl->gl_object;
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
|
||||
if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
|
||||
|
|
|
@ -336,7 +336,6 @@ enum {
|
|||
};
|
||||
|
||||
struct gfs2_glock {
|
||||
struct hlist_bl_node gl_list;
|
||||
unsigned long gl_flags; /* GLF_... */
|
||||
struct lm_lockname gl_name;
|
||||
|
||||
|
@ -386,6 +385,7 @@ enum {
|
|||
GIF_SW_PAGED = 3,
|
||||
GIF_ORDERED = 4,
|
||||
GIF_FREE_VFS_INODE = 5,
|
||||
GIF_GLOP_PENDING = 6,
|
||||
};
|
||||
|
||||
struct gfs2_inode {
|
||||
|
@ -820,7 +820,6 @@ struct gfs2_sbd {
|
|||
wait_queue_head_t sd_reserving_log_wait;
|
||||
|
||||
unsigned int sd_log_flush_head;
|
||||
u64 sd_log_flush_wrapped;
|
||||
|
||||
spinlock_t sd_ail_lock;
|
||||
struct list_head sd_ail1_list;
|
||||
|
@ -857,5 +856,7 @@ static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
|
|||
preempt_enable();
|
||||
}
|
||||
|
||||
extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
|
||||
|
||||
#endif /* __INCORE_DOT_H__ */
|
||||
|
||||
|
|
|
@ -144,7 +144,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
|||
error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
|
||||
if (unlikely(error))
|
||||
goto fail;
|
||||
ip->i_gl->gl_object = ip;
|
||||
flush_delayed_work(&ip->i_gl->gl_work);
|
||||
glock_set_object(ip->i_gl, ip);
|
||||
|
||||
error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
|
||||
if (unlikely(error))
|
||||
|
@ -173,8 +174,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
|||
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
|
||||
if (unlikely(error))
|
||||
goto fail_put;
|
||||
|
||||
ip->i_iopen_gh.gh_gl->gl_object = ip;
|
||||
flush_delayed_work(&ip->i_iopen_gh.gh_gl->gl_work);
|
||||
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
|
||||
gfs2_glock_put(io_gl);
|
||||
io_gl = NULL;
|
||||
|
||||
|
@ -201,14 +202,14 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
|||
|
||||
fail_refresh:
|
||||
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
|
||||
ip->i_iopen_gh.gh_gl->gl_object = NULL;
|
||||
glock_set_object(ip->i_iopen_gh.gh_gl, NULL);
|
||||
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
|
||||
fail_put:
|
||||
if (io_gl)
|
||||
gfs2_glock_put(io_gl);
|
||||
if (gfs2_holder_initialized(&i_gh))
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
ip->i_gl->gl_object = NULL;
|
||||
glock_set_object(ip->i_gl, NULL);
|
||||
fail:
|
||||
iget_failed(inode);
|
||||
return ERR_PTR(error);
|
||||
|
@ -607,6 +608,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
|||
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
|
||||
if (error)
|
||||
goto fail;
|
||||
gfs2_holder_mark_uninitialized(ghs + 1);
|
||||
|
||||
error = create_ok(dip, name, mode);
|
||||
if (error)
|
||||
|
@ -705,7 +707,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
|||
if (error)
|
||||
goto fail_free_inode;
|
||||
|
||||
ip->i_gl->gl_object = ip;
|
||||
glock_set_object(ip->i_gl, ip);
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
|
||||
if (error)
|
||||
goto fail_free_inode;
|
||||
|
@ -731,7 +733,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
|||
if (error)
|
||||
goto fail_gunlock2;
|
||||
|
||||
ip->i_iopen_gh.gh_gl->gl_object = ip;
|
||||
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
|
||||
gfs2_glock_put(io_gl);
|
||||
gfs2_set_iop(inode);
|
||||
insert_inode_hash(inode);
|
||||
|
@ -778,7 +780,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
|||
fail_gunlock2:
|
||||
if (io_gl)
|
||||
clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
|
||||
gfs2_glock_dq_uninit(ghs + 1);
|
||||
fail_free_inode:
|
||||
if (ip->i_gl)
|
||||
gfs2_glock_put(ip->i_gl);
|
||||
|
@ -799,6 +800,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
|||
&GFS2_I(inode)->i_flags);
|
||||
iput(inode);
|
||||
}
|
||||
if (gfs2_holder_initialized(ghs + 1))
|
||||
gfs2_glock_dq_uninit(ghs + 1);
|
||||
fail:
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -722,7 +722,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
|
|||
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
|
||||
sdp->sd_log_flush_head = sdp->sd_log_head;
|
||||
sdp->sd_log_flush_wrapped = 0;
|
||||
tr = sdp->sd_log_tr;
|
||||
if (tr) {
|
||||
sdp->sd_log_tr = NULL;
|
||||
|
@ -775,7 +774,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
|
|||
}
|
||||
atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
|
||||
trace_gfs2_log_blocks(sdp, -1);
|
||||
sdp->sd_log_flush_wrapped = 0;
|
||||
log_write_header(sdp, 0);
|
||||
sdp->sd_log_head = sdp->sd_log_flush_head;
|
||||
}
|
||||
|
@ -880,7 +878,6 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
|
|||
gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
|
||||
|
||||
sdp->sd_log_flush_head = sdp->sd_log_head;
|
||||
sdp->sd_log_flush_wrapped = 0;
|
||||
|
||||
log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT);
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ static void maybe_release_space(struct gfs2_bufdata *bd)
|
|||
{
|
||||
struct gfs2_glock *gl = bd->bd_gl;
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct gfs2_rgrpd *rgd = gl->gl_object;
|
||||
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
|
||||
unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
|
||||
struct gfs2_bitmap *bi = rgd->rd_bits + index;
|
||||
|
||||
|
@ -134,10 +134,8 @@ static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
|
|||
BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
|
||||
(sdp->sd_log_flush_head != sdp->sd_log_head));
|
||||
|
||||
if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
|
||||
if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
|
||||
sdp->sd_log_flush_head = 0;
|
||||
sdp->sd_log_flush_wrapped = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
|
||||
|
|
|
@ -52,7 +52,6 @@ static void gfs2_init_glock_once(void *foo)
|
|||
{
|
||||
struct gfs2_glock *gl = foo;
|
||||
|
||||
INIT_HLIST_BL_NODE(&gl->gl_list);
|
||||
spin_lock_init(&gl->gl_lockref.lock);
|
||||
INIT_LIST_HEAD(&gl->gl_holders);
|
||||
INIT_LIST_HEAD(&gl->gl_lru);
|
||||
|
|
|
@ -705,9 +705,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
|
|||
rb_erase(n, &sdp->sd_rindex_tree);
|
||||
|
||||
if (gl) {
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
gl->gl_object = NULL;
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
glock_set_object(gl, NULL);
|
||||
gfs2_glock_add_to_lru(gl);
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
@ -917,7 +915,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
|
|||
error = rgd_insert(rgd);
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
if (!error) {
|
||||
rgd->rd_gl->gl_object = rgd;
|
||||
glock_set_object(rgd->rd_gl, rgd);
|
||||
rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
|
||||
rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
|
||||
rgd->rd_length) * bsize) - 1;
|
||||
|
|
|
@ -1105,9 +1105,12 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
|
|||
gfs2_holder_uninit(gh);
|
||||
error = err;
|
||||
} else {
|
||||
if (!error)
|
||||
error = statfs_slow_fill(
|
||||
gh->gh_gl->gl_object, sc);
|
||||
if (!error) {
|
||||
struct gfs2_rgrpd *rgd =
|
||||
gfs2_glock2rgrp(gh->gh_gl);
|
||||
|
||||
error = statfs_slow_fill(rgd, sc);
|
||||
}
|
||||
gfs2_glock_dq_uninit(gh);
|
||||
}
|
||||
}
|
||||
|
@ -1535,6 +1538,12 @@ static void gfs2_evict_inode(struct inode *inode)
|
|||
if (inode->i_nlink || (sb->s_flags & MS_RDONLY))
|
||||
goto out;
|
||||
|
||||
if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
|
||||
BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
|
||||
gfs2_holder_mark_uninitialized(&gh);
|
||||
goto alloc_failed;
|
||||
}
|
||||
|
||||
/* Must not read inode block until block type has been verified */
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
|
||||
if (unlikely(error)) {
|
||||
|
@ -1543,11 +1552,9 @@ static void gfs2_evict_inode(struct inode *inode)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
|
||||
error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
|
||||
if (error)
|
||||
goto out_truncate;
|
||||
}
|
||||
error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
|
||||
if (error)
|
||||
goto out_truncate;
|
||||
|
||||
if (test_bit(GIF_INVALID, &ip->i_flags)) {
|
||||
error = gfs2_inode_refresh(ip);
|
||||
|
@ -1555,6 +1562,7 @@ static void gfs2_evict_inode(struct inode *inode)
|
|||
goto out_truncate;
|
||||
}
|
||||
|
||||
alloc_failed:
|
||||
if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
|
||||
test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
|
||||
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
|
||||
|
@ -1621,7 +1629,8 @@ static void gfs2_evict_inode(struct inode *inode)
|
|||
}
|
||||
gfs2_holder_uninit(&ip->i_iopen_gh);
|
||||
}
|
||||
gfs2_glock_dq_uninit(&gh);
|
||||
if (gfs2_holder_initialized(&gh))
|
||||
gfs2_glock_dq_uninit(&gh);
|
||||
if (error && error != GLR_TRYFAILED && error != -EROFS)
|
||||
fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
|
||||
out:
|
||||
|
@ -1631,13 +1640,13 @@ static void gfs2_evict_inode(struct inode *inode)
|
|||
gfs2_ordered_del_inode(ip);
|
||||
clear_inode(inode);
|
||||
gfs2_dir_hash_inval(ip);
|
||||
ip->i_gl->gl_object = NULL;
|
||||
flush_delayed_work(&ip->i_gl->gl_work);
|
||||
glock_set_object(ip->i_gl, NULL);
|
||||
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
|
||||
gfs2_glock_add_to_lru(ip->i_gl);
|
||||
gfs2_glock_put(ip->i_gl);
|
||||
ip->i_gl = NULL;
|
||||
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
|
||||
ip->i_iopen_gh.gh_gl->gl_object = NULL;
|
||||
glock_set_object(ip->i_iopen_gh.gh_gl, NULL);
|
||||
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
|
||||
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
|
||||
}
|
||||
|
|
|
@ -626,12 +626,12 @@ static struct attribute *tune_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group tune_group = {
|
||||
static const struct attribute_group tune_group = {
|
||||
.name = "tune",
|
||||
.attrs = tune_attrs,
|
||||
};
|
||||
|
||||
static struct attribute_group lock_module_group = {
|
||||
static const struct attribute_group lock_module_group = {
|
||||
.name = "lock_module",
|
||||
.attrs = lock_module_attrs,
|
||||
};
|
||||
|
|
|
@ -1327,8 +1327,8 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
|
|||
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
|
||||
|
||||
for (x = 0; x < rlist.rl_rgrps; x++) {
|
||||
struct gfs2_rgrpd *rgd;
|
||||
rgd = rlist.rl_ghs[x].gh_gl->gl_object;
|
||||
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
|
||||
|
||||
rg_blocks += rgd->rd_length;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue