ceph: cleanup async writeback, truncation, invalidate helpers

Grab inode ref in helper.  Make work functions static, with consistent
naming.

Signed-off-by: Sage Weil <sage@newdream.net>
This commit is contained in:
Sage Weil 2010-02-09 15:24:44 -08:00
parent 6a026589ba
commit 3c6f6b79a6
4 changed files with 65 additions and 43 deletions

View File

@ -947,8 +947,7 @@ static int ceph_update_writeable_page(struct file *file,
*/
snapc = ceph_get_snap_context((void *)page->private);
unlock_page(page);
if (ceph_queue_writeback(inode))
igrab(inode);
ceph_queue_writeback(inode);
wait_event_interruptible(ci->i_cap_wq,
context_is_writeable_or_written(inode, snapc));
ceph_put_snap_context(snapc);

View File

@ -1602,8 +1602,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
spin_unlock(&inode->i_lock);
if (queue_invalidate)
if (ceph_queue_page_invalidation(inode))
igrab(inode);
ceph_queue_invalidate(inode);
if (session && drop_session_lock)
mutex_unlock(&session->s_mutex);
@ -2178,7 +2177,7 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
int wake = 0;
int writeback = 0;
int revoked_rdcache = 0;
int invalidate_async = 0;
int queue_invalidate = 0;
int tried_invalidate = 0;
int ret;
@ -2205,7 +2204,7 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
/* there were locked pages.. invalidate later
in a separate thread. */
if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
invalidate_async = 1;
queue_invalidate = 1;
ci->i_rdcache_revoking = ci->i_rdcache_gen;
}
} else {
@ -2319,21 +2318,15 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
}
spin_unlock(&inode->i_lock);
if (writeback) {
if (writeback)
/*
* queue inode for writeback: we can't actually call
* filemap_write_and_wait, etc. from message handler
* context.
*/
dout("queueing %p for writeback\n", inode);
if (ceph_queue_writeback(inode))
igrab(inode);
}
if (invalidate_async) {
dout("queueing %p for page invalidation\n", inode);
if (ceph_queue_page_invalidation(inode))
igrab(inode);
}
ceph_queue_writeback(inode);
if (queue_invalidate)
ceph_queue_invalidate(inode);
if (wake)
wake_up(&ci->i_cap_wq);
return reply;
@ -2479,9 +2472,7 @@ static void handle_cap_trunc(struct inode *inode,
spin_unlock(&inode->i_lock);
if (queue_trunc)
if (queue_work(ceph_client(inode->i_sb)->trunc_wq,
&ci->i_vmtruncate_work))
igrab(inode);
ceph_queue_vmtruncate(inode);
}
/*

View File

@ -28,7 +28,9 @@
static const struct inode_operations ceph_symlink_iops;
static void ceph_inode_invalidate_pages(struct work_struct *work);
static void ceph_invalidate_work(struct work_struct *work);
static void ceph_writeback_work(struct work_struct *work);
static void ceph_vmtruncate_work(struct work_struct *work);
/*
* find or create an inode, given the ceph ino number
@ -357,8 +359,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
INIT_LIST_HEAD(&ci->i_snap_realm_item);
INIT_LIST_HEAD(&ci->i_snap_flush_item);
INIT_WORK(&ci->i_wb_work, ceph_inode_writeback);
INIT_WORK(&ci->i_pg_inv_work, ceph_inode_invalidate_pages);
INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
@ -675,9 +677,7 @@ static int fill_inode(struct inode *inode,
/* queue truncate if we saw i_size decrease */
if (queue_trunc)
if (queue_work(ceph_client(inode->i_sb)->trunc_wq,
&ci->i_vmtruncate_work))
igrab(inode);
ceph_queue_vmtruncate(inode);
/* populate frag tree */
/* FIXME: move me up, if/when version reflects fragtree changes */
@ -1243,7 +1243,18 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
* Write back inode data in a worker thread. (This can't be done
* in the message handler context.)
*/
void ceph_inode_writeback(struct work_struct *work)
void ceph_queue_writeback(struct inode *inode)
{
if (queue_work(ceph_inode_to_client(inode)->wb_wq,
&ceph_inode(inode)->i_wb_work)) {
dout("ceph_queue_invalidate %p\n", inode);
igrab(inode);
} else {
dout("ceph_queue_invalidate %p failed\n", inode);
}
}
static void ceph_writeback_work(struct work_struct *work)
{
struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
i_wb_work);
@ -1254,11 +1265,25 @@ void ceph_inode_writeback(struct work_struct *work)
iput(inode);
}
/*
* queue an async invalidation
*/
void ceph_queue_invalidate(struct inode *inode)
{
if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
&ceph_inode(inode)->i_pg_inv_work)) {
dout("ceph_queue_invalidate %p\n", inode);
igrab(inode);
} else {
dout("ceph_queue_invalidate %p failed\n", inode);
}
}
/*
* Invalidate inode pages in a worker thread. (This can't be done
* in the message handler context.)
*/
static void ceph_inode_invalidate_pages(struct work_struct *work)
static void ceph_invalidate_work(struct work_struct *work)
{
struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
i_pg_inv_work);
@ -1307,7 +1332,7 @@ static void ceph_inode_invalidate_pages(struct work_struct *work)
*
* We also truncate in a separate thread as well.
*/
void ceph_vmtruncate_work(struct work_struct *work)
static void ceph_vmtruncate_work(struct work_struct *work)
{
struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
i_vmtruncate_work);
@ -1320,6 +1345,24 @@ void ceph_vmtruncate_work(struct work_struct *work)
iput(inode);
}
/*
* Queue an async vmtruncate. If we fail to queue work, we will handle
* the truncation the next time we call __ceph_do_pending_vmtruncate.
*/
void ceph_queue_vmtruncate(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
if (queue_work(ceph_client(inode->i_sb)->trunc_wq,
&ci->i_vmtruncate_work)) {
dout("ceph_queue_vmtruncate %p\n", inode);
igrab(inode);
} else {
dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
inode, ci->i_truncate_pending);
}
}
/*
* called with i_mutex held.
*

View File

@ -573,18 +573,6 @@ static inline struct ceph_client *ceph_sb_to_client(struct super_block *sb)
return (struct ceph_client *)sb->s_fs_info;
}
static inline int ceph_queue_writeback(struct inode *inode)
{
return queue_work(ceph_inode_to_client(inode)->wb_wq,
&ceph_inode(inode)->i_wb_work);
}
static inline int ceph_queue_page_invalidation(struct inode *inode)
{
return queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
&ceph_inode(inode)->i_pg_inv_work);
}
/*
* we keep buffered readdir results attached to file->private_data
@ -772,10 +760,11 @@ extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
extern int ceph_inode_holds_cap(struct inode *inode, int mask);
extern int ceph_inode_set_size(struct inode *inode, loff_t size);
extern void ceph_inode_writeback(struct work_struct *work);
extern void ceph_vmtruncate_work(struct work_struct *work);
extern void __ceph_do_pending_vmtruncate(struct inode *inode);
extern void __ceph_queue_vmtruncate(struct inode *inode);
extern void ceph_queue_vmtruncate(struct inode *inode);
extern void ceph_queue_invalidate(struct inode *inode);
extern void ceph_queue_writeback(struct inode *inode);
extern int ceph_do_getattr(struct inode *inode, int mask);
extern int ceph_permission(struct inode *inode, int mask);