ceph: handle race between vmtruncate and queuing cap snap
It's possible that we create a cap snap while there is pending vmtruncate (truncate hasn't been processed by worker thread). We should truncate dirty pages beyond capsnap->size in that case. Signed-off-by: "Yan, Zheng" <zyan@redhat.com> Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
parent
fa0aa3b839
commit
c8fd0d37f8
|
@ -1833,9 +1833,20 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
|
|||
* possibly truncate them.. so write AND block!
|
||||
*/
|
||||
if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
|
||||
struct ceph_cap_snap *capsnap;
|
||||
to = ci->i_truncate_size;
|
||||
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
|
||||
// MDS should have revoked Frw caps
|
||||
WARN_ON_ONCE(capsnap->writing);
|
||||
if (capsnap->dirty_pages && capsnap->size > to)
|
||||
to = capsnap->size;
|
||||
}
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
dout("__do_pending_vmtruncate %p flushing snaps first\n",
|
||||
inode);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
truncate_pagecache(inode, to);
|
||||
|
||||
filemap_write_and_wait_range(&inode->i_data, 0,
|
||||
inode->i_sb->s_maxbytes);
|
||||
goto retry;
|
||||
|
|
Loading…
Reference in New Issue