fuse: Introduce fi->lock to protect write related fields

To minimize contention of fc->lock, this patch introduces a new spinlock
for protection fuse_inode metadata:

fuse_inode:
	writectr
	writepages
	write_files
	queued_writes
	attr_version

inode:
	i_size
	i_nlink
	i_mtime
	i_ctime

Also, it protects the fields changed in fuse_change_attributes_common()
(too many to list).

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
Kirill Tkhai 2018-11-09 13:33:22 +03:00 committed by Miklos Szeredi
parent 4510d86fbb
commit f15ecfef05
4 changed files with 70 additions and 62 deletions

View File

@ -658,7 +658,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
struct inode *inode = d_inode(entry);
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
spin_lock(&fi->lock);
fi->attr_version = atomic64_inc_return(&fc->attr_version);
/*
* If i_nlink == 0 then unlink doesn't make sense, yet this can
@ -668,7 +668,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
*/
if (inode->i_nlink > 0)
drop_nlink(inode);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
fuse_invalidate_attr(inode);
fuse_dir_changed(dir);
fuse_invalidate_entry_cache(entry);
@ -812,10 +812,10 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
if (!err) {
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
spin_lock(&fi->lock);
fi->attr_version = atomic64_inc_return(&fc->attr_version);
inc_nlink(inode);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
fuse_invalidate_attr(inode);
fuse_update_ctime(inode);
} else if (err == -EINTR) {
@ -1343,15 +1343,14 @@ static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
*/
void fuse_set_nowrite(struct inode *inode)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
BUG_ON(!inode_is_locked(inode));
spin_lock(&fc->lock);
spin_lock(&fi->lock);
BUG_ON(fi->writectr < 0);
fi->writectr += FUSE_NOWRITE;
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
}
@ -1372,11 +1371,11 @@ static void __fuse_release_nowrite(struct inode *inode)
void fuse_release_nowrite(struct inode *inode)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
spin_lock(&fi->lock);
__fuse_release_nowrite(inode);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
}
static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
@ -1511,7 +1510,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
goto error;
}
spin_lock(&fc->lock);
spin_lock(&fi->lock);
/* the kernel maintains i_mtime locally */
if (trust_local_cmtime) {
if (attr->ia_valid & ATTR_MTIME)
@ -1529,10 +1528,10 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
i_size_write(inode, outarg.attr.size);
if (is_truncate) {
/* NOTE: this may release/reacquire fc->lock */
/* NOTE: this may release/reacquire fi->lock */
__fuse_release_nowrite(inode);
}
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
/*
* Only call invalidate_inode_pages2() after removing

View File

@ -159,17 +159,16 @@ EXPORT_SYMBOL_GPL(fuse_do_open);
static void fuse_link_write_file(struct file *file)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_file *ff = file->private_data;
/*
* file may be written through mmap, so chain it onto the
* inodes's write_file list
*/
spin_lock(&fc->lock);
spin_lock(&fi->lock);
if (list_empty(&ff->write_entry))
list_add(&ff->write_entry, &fi->write_files);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
}
void fuse_finish_open(struct inode *inode, struct file *file)
@ -186,10 +185,10 @@ void fuse_finish_open(struct inode *inode, struct file *file)
if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
spin_lock(&fi->lock);
fi->attr_version = atomic64_inc_return(&fc->attr_version);
i_size_write(inode, 0);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
fuse_invalidate_attr(inode);
if (fc->writeback_cache)
file_update_time(file);
@ -231,8 +230,13 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
struct fuse_req *req = ff->reserved_req;
struct fuse_release_in *inarg = &req->misc.release.in;
/* Inode is NULL on error path of fuse_create_open() */
if (likely(fi)) {
spin_lock(&fi->lock);
list_del(&ff->write_entry);
spin_unlock(&fi->lock);
}
spin_lock(&fc->lock);
list_del(&ff->write_entry);
if (!RB_EMPTY_NODE(&ff->polled_node))
rb_erase(&ff->polled_node, &fc->polled_files);
spin_unlock(&fc->lock);
@ -358,13 +362,12 @@ static struct fuse_req *fuse_find_writeback(struct fuse_inode *fi,
static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
pgoff_t idx_to)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
bool found;
spin_lock(&fc->lock);
spin_lock(&fi->lock);
found = fuse_find_writeback(fi, idx_from, idx_to);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
return found;
}
@ -607,9 +610,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
spin_lock(&fi->lock);
fi->attr_version = atomic64_inc_return(&fc->attr_version);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
}
io->iocb->ki_complete(io->iocb, res, 0);
@ -684,13 +687,13 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
spin_lock(&fi->lock);
if (attr_ver == fi->attr_version && size < inode->i_size &&
!test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
fi->attr_version = atomic64_inc_return(&fc->attr_version);
i_size_write(inode, size);
}
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
}
static void fuse_short_read(struct fuse_req *req, struct inode *inode,
@ -1005,13 +1008,13 @@ bool fuse_write_update_size(struct inode *inode, loff_t pos)
struct fuse_inode *fi = get_fuse_inode(inode);
bool ret = false;
spin_lock(&fc->lock);
spin_lock(&fi->lock);
fi->attr_version = atomic64_inc_return(&fc->attr_version);
if (pos > inode->i_size) {
i_size_write(inode, pos);
ret = true;
}
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
return ret;
}
@ -1490,11 +1493,11 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
wake_up(&fi->page_waitq);
}
/* Called under fc->lock, may release and reacquire it */
/* Called under fi->lock, may release and reacquire it */
static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
loff_t size)
__releases(fc->lock)
__acquires(fc->lock)
__releases(fi->lock)
__acquires(fi->lock)
{
struct fuse_req *aux, *next;
struct fuse_inode *fi = get_fuse_inode(req->inode);
@ -1502,9 +1505,6 @@ __acquires(fc->lock)
__u64 data_size = req->num_pages * PAGE_SIZE;
bool queued;
if (!fc->connected)
goto out_free;
if (inarg->offset + data_size <= size) {
inarg->size = data_size;
} else if (inarg->offset < size) {
@ -1515,14 +1515,17 @@ __acquires(fc->lock)
}
req->in.args[1].size = inarg->size;
fi->writectr++;
queued = fuse_request_queue_background(fc, req);
WARN_ON(!queued);
/* Fails on broken connection only */
if (unlikely(!queued))
goto out_free;
fi->writectr++;
return;
out_free:
fuse_writepage_finish(fc, req);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
/* After fuse_writepage_finish() aux request list is private */
for (aux = req->misc.write.next; aux; aux = next) {
@ -1534,18 +1537,18 @@ __acquires(fc->lock)
fuse_writepage_free(fc, req);
fuse_put_request(fc, req);
spin_lock(&fc->lock);
spin_lock(&fi->lock);
}
/*
* If fi->writectr is positive (no truncate or fsync going on) send
* all queued writepage requests.
*
* Called with fc->lock
* Called with fi->lock
*/
void fuse_flush_writepages(struct inode *inode)
__releases(fc->lock)
__acquires(fc->lock)
__releases(fi->lock)
__acquires(fi->lock)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
@ -1565,7 +1568,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
struct fuse_inode *fi = get_fuse_inode(inode);
mapping_set_error(inode->i_mapping, req->out.h.error);
spin_lock(&fc->lock);
spin_lock(&fi->lock);
while (req->misc.write.next) {
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_write_in *inarg = &req->misc.write.in;
@ -1602,7 +1605,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
}
fi->writectr--;
fuse_writepage_finish(fc, req);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
fuse_writepage_free(fc, req);
}
@ -1611,13 +1614,13 @@ static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
{
struct fuse_file *ff = NULL;
spin_lock(&fc->lock);
spin_lock(&fi->lock);
if (!list_empty(&fi->write_files)) {
ff = list_entry(fi->write_files.next, struct fuse_file,
write_entry);
fuse_file_get(ff);
}
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
return ff;
}
@ -1688,11 +1691,11 @@ static int fuse_writepage_locked(struct page *page)
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
spin_lock(&fc->lock);
spin_lock(&fi->lock);
list_add(&req->writepages_entry, &fi->writepages);
list_add_tail(&req->list, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
end_page_writeback(page);
@ -1741,16 +1744,15 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data)
{
struct fuse_req *req = data->req;
struct inode *inode = data->inode;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
int num_pages = req->num_pages;
int i;
req->ff = fuse_file_get(data->ff);
spin_lock(&fc->lock);
spin_lock(&fi->lock);
list_add_tail(&req->list, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
for (i = 0; i < num_pages; i++)
end_page_writeback(data->orig_pages[i]);
@ -1773,12 +1775,12 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
WARN_ON(new_req->num_pages != 0);
spin_lock(&fc->lock);
spin_lock(&fi->lock);
list_del(&new_req->writepages_entry);
old_req = fuse_find_writeback(fi, page->index, page->index);
if (!old_req) {
list_add(&new_req->writepages_entry, &fi->writepages);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
return false;
}
@ -1801,7 +1803,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
old_req->misc.write.next = new_req;
}
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
if (tmp) {
struct backing_dev_info *bdi = inode_to_bdi(new_req->inode);
@ -1822,6 +1824,7 @@ static int fuse_writepages_fill(struct page *page,
struct fuse_fill_wb_data *data = _data;
struct fuse_req *req = data->req;
struct inode *inode = data->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
struct page *tmp_page;
bool is_writeback;
@ -1892,9 +1895,9 @@ static int fuse_writepages_fill(struct page *page,
req->end = fuse_writepage_end;
req->inode = inode;
spin_lock(&fc->lock);
spin_lock(&fi->lock);
list_add(&req->writepages_entry, &fi->writepages);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
data->req = req;
}
@ -1917,12 +1920,12 @@ static int fuse_writepages_fill(struct page *page,
data->orig_pages[req->num_pages] = page;
/*
* Protected by fc->lock against concurrent access by
* Protected by fi->lock against concurrent access by
* fuse_page_is_writeback().
*/
spin_lock(&fc->lock);
spin_lock(&fi->lock);
req->num_pages++;
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
out_unlock:
unlock_page(page);

View File

@ -96,7 +96,7 @@ struct fuse_inode {
union {
/* Write related fields (regular file only) */
struct {
/* Files usable in writepage. Protected by fc->lock */
/* Files usable in writepage. Protected by fi->lock */
struct list_head write_files;
/* Writepages pending on truncate or fsync */
@ -144,6 +144,9 @@ struct fuse_inode {
/** Lock for serializing lookup and readdir for back compatibility*/
struct mutex mutex;
/** Lock to protect write related fields */
spinlock_t lock;
};
/** FUSE inode state bits */

View File

@ -97,6 +97,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
fi->orig_ino = 0;
fi->state = 0;
mutex_init(&fi->mutex);
spin_lock_init(&fi->lock);
fi->forget = fuse_alloc_forget();
if (!fi->forget) {
kmem_cache_free(fuse_inode_cachep, inode);
@ -163,6 +164,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
lockdep_assert_held(&fi->lock);
fi->attr_version = atomic64_inc_return(&fc->attr_version);
fi->i_time = attr_valid;
WRITE_ONCE(fi->inval_mask, 0);
@ -209,10 +212,10 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
loff_t oldsize;
struct timespec64 old_mtime;
spin_lock(&fc->lock);
spin_lock(&fi->lock);
if ((attr_version != 0 && fi->attr_version > attr_version) ||
test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
return;
}
@ -227,7 +230,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
*/
if (!is_wb || !S_ISREG(inode->i_mode))
i_size_write(inode, attr->size);
spin_unlock(&fc->lock);
spin_unlock(&fi->lock);
if (!is_wb && S_ISREG(inode->i_mode)) {
bool inval = false;