fuse: introduce fc->bg_lock
To reduce contention of fc->lock, this patch introduces bg_lock for protection of fields related to background queue. These are: max_background, congestion_threshold, num_background, active_background, bg_queue and blocked. This allows next patch to make async reads not requiring fc->lock, so async reads and writes will have better performance executed in parallel. Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
parent
2b30a53314
commit
ae2dffa394
|
@ -125,12 +125,12 @@ static ssize_t fuse_conn_max_background_write(struct file *file,
|
|||
if (ret > 0) {
|
||||
struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
|
||||
if (fc) {
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fc->bg_lock);
|
||||
fc->max_background = val;
|
||||
fc->blocked = fc->num_background >= fc->max_background;
|
||||
if (!fc->blocked)
|
||||
wake_up(&fc->blocked_waitq);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fc->bg_lock);
|
||||
fuse_conn_put(fc);
|
||||
}
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
|
|||
if (!fc)
|
||||
goto out;
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fc->bg_lock);
|
||||
fc->congestion_threshold = val;
|
||||
if (fc->sb) {
|
||||
if (fc->num_background < fc->congestion_threshold) {
|
||||
|
@ -182,7 +182,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
|
|||
set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
|
||||
}
|
||||
}
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fc->bg_lock);
|
||||
fuse_conn_put(fc);
|
||||
out:
|
||||
return ret;
|
||||
|
|
|
@ -287,10 +287,10 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
|
|||
* We get here in the unlikely case that a background
|
||||
* request was allocated but not sent
|
||||
*/
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fc->bg_lock);
|
||||
if (!fc->blocked)
|
||||
wake_up(&fc->blocked_waitq);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fc->bg_lock);
|
||||
}
|
||||
|
||||
if (test_bit(FR_WAITING, &req->flags)) {
|
||||
|
@ -390,7 +390,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
|
|||
WARN_ON(test_bit(FR_PENDING, &req->flags));
|
||||
WARN_ON(test_bit(FR_SENT, &req->flags));
|
||||
if (test_bit(FR_BACKGROUND, &req->flags)) {
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fc->bg_lock);
|
||||
clear_bit(FR_BACKGROUND, &req->flags);
|
||||
if (fc->num_background == fc->max_background) {
|
||||
fc->blocked = 0;
|
||||
|
@ -413,7 +413,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
|
|||
fc->num_background--;
|
||||
fc->active_background--;
|
||||
flush_bg_queue(fc);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fc->bg_lock);
|
||||
}
|
||||
wake_up(&req->waitq);
|
||||
if (req->end)
|
||||
|
@ -586,8 +586,8 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
|
|||
*
|
||||
* fc->connected must have been checked previously
|
||||
*/
|
||||
void fuse_request_send_background_locked(struct fuse_conn *fc,
|
||||
struct fuse_req *req)
|
||||
void fuse_request_send_background_nocheck(struct fuse_conn *fc,
|
||||
struct fuse_req *req)
|
||||
{
|
||||
BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
|
||||
if (!test_bit(FR_WAITING, &req->flags)) {
|
||||
|
@ -595,6 +595,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc,
|
|||
atomic_inc(&fc->num_waiting);
|
||||
}
|
||||
__set_bit(FR_ISREPLY, &req->flags);
|
||||
spin_lock(&fc->bg_lock);
|
||||
fc->num_background++;
|
||||
if (fc->num_background == fc->max_background)
|
||||
fc->blocked = 1;
|
||||
|
@ -604,6 +605,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc,
|
|||
}
|
||||
list_add_tail(&req->list, &fc->bg_queue);
|
||||
flush_bg_queue(fc);
|
||||
spin_unlock(&fc->bg_lock);
|
||||
}
|
||||
|
||||
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
|
||||
|
@ -611,7 +613,7 @@ void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
|
|||
BUG_ON(!req->end);
|
||||
spin_lock(&fc->lock);
|
||||
if (fc->connected) {
|
||||
fuse_request_send_background_locked(fc, req);
|
||||
fuse_request_send_background_nocheck(fc, req);
|
||||
spin_unlock(&fc->lock);
|
||||
} else {
|
||||
spin_unlock(&fc->lock);
|
||||
|
@ -2118,7 +2120,6 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
|
|||
LIST_HEAD(to_end);
|
||||
|
||||
fc->connected = 0;
|
||||
fc->blocked = 0;
|
||||
fc->aborted = is_abort;
|
||||
fuse_set_initialized(fc);
|
||||
list_for_each_entry(fud, &fc->devices, entry) {
|
||||
|
@ -2140,8 +2141,11 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
|
|||
list_splice_tail_init(&fpq->processing, &to_end);
|
||||
spin_unlock(&fpq->lock);
|
||||
}
|
||||
spin_lock(&fc->bg_lock);
|
||||
fc->blocked = 0;
|
||||
fc->max_background = UINT_MAX;
|
||||
flush_bg_queue(fc);
|
||||
spin_unlock(&fc->bg_lock);
|
||||
|
||||
spin_lock(&fiq->waitq.lock);
|
||||
fiq->connected = 0;
|
||||
|
|
|
@ -1502,7 +1502,7 @@ __acquires(fc->lock)
|
|||
|
||||
req->in.args[1].size = inarg->size;
|
||||
fi->writectr++;
|
||||
fuse_request_send_background_locked(fc, req);
|
||||
fuse_request_send_background_nocheck(fc, req);
|
||||
return;
|
||||
|
||||
out_free:
|
||||
|
|
|
@ -500,6 +500,10 @@ struct fuse_conn {
|
|||
/** The list of background requests set aside for later queuing */
|
||||
struct list_head bg_queue;
|
||||
|
||||
/** Protects: max_background, congestion_threshold, num_background,
|
||||
* active_background, bg_queue, blocked */
|
||||
spinlock_t bg_lock;
|
||||
|
||||
/** Flag indicating that INIT reply has been received. Allocating
|
||||
* any fuse request will be suspended until the flag is set */
|
||||
int initialized;
|
||||
|
@ -860,8 +864,8 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args);
|
|||
*/
|
||||
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
|
||||
|
||||
void fuse_request_send_background_locked(struct fuse_conn *fc,
|
||||
struct fuse_req *req);
|
||||
void fuse_request_send_background_nocheck(struct fuse_conn *fc,
|
||||
struct fuse_req *req);
|
||||
|
||||
/* Abort all requests */
|
||||
void fuse_abort_conn(struct fuse_conn *fc, bool is_abort);
|
||||
|
|
|
@ -605,6 +605,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
|
|||
{
|
||||
memset(fc, 0, sizeof(*fc));
|
||||
spin_lock_init(&fc->lock);
|
||||
spin_lock_init(&fc->bg_lock);
|
||||
init_rwsem(&fc->killsb);
|
||||
refcount_set(&fc->count, 1);
|
||||
atomic_set(&fc->dev_count, 1);
|
||||
|
@ -852,6 +853,7 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
|
|||
sanitize_global_limit(&max_user_bgreq);
|
||||
sanitize_global_limit(&max_user_congthresh);
|
||||
|
||||
spin_lock(&fc->bg_lock);
|
||||
if (arg->max_background) {
|
||||
fc->max_background = arg->max_background;
|
||||
|
||||
|
@ -865,6 +867,7 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
|
|||
fc->congestion_threshold > max_user_congthresh)
|
||||
fc->congestion_threshold = max_user_congthresh;
|
||||
}
|
||||
spin_unlock(&fc->bg_lock);
|
||||
}
|
||||
|
||||
static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
|
||||
|
|
Loading…
Reference in New Issue