ceph: use complete_all and wake_up_all

This fixes an issue triggered by running concurrent syncs. One of the syncs
would go through while the other would just hang indefinitely. In any case, we
never actually want to wake a single waiter, so the *_all functions should
be used.

Signed-off-by: Yehuda Sadeh <yehuda@hq.newdream.net>
Signed-off-by: Sage Weil <sage@newdream.net>
This commit is contained in:
Yehuda Sadeh 2010-07-27 13:11:08 -07:00 committed by Sage Weil
parent 25848b3ec6
commit 03066f2345
6 changed files with 20 additions and 20 deletions

View File

@ -627,7 +627,7 @@ int ceph_add_cap(struct inode *inode,
if (fmode >= 0)
__ceph_get_fmode(ci, fmode);
spin_unlock(&inode->i_lock);
wake_up(&ci->i_cap_wq);
wake_up_all(&ci->i_cap_wq);
return 0;
}
@ -1181,7 +1181,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
}
if (wake)
wake_up(&ci->i_cap_wq);
wake_up_all(&ci->i_cap_wq);
return delayed;
}
@ -2153,7 +2153,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
else if (flushsnaps)
ceph_flush_snaps(ci);
if (wake)
wake_up(&ci->i_cap_wq);
wake_up_all(&ci->i_cap_wq);
if (put)
iput(inode);
}
@ -2229,7 +2229,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
iput(inode);
} else if (complete_capsnap) {
ceph_flush_snaps(ci);
wake_up(&ci->i_cap_wq);
wake_up_all(&ci->i_cap_wq);
}
if (drop_capsnap)
iput(inode);
@ -2405,7 +2405,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
if (queue_invalidate)
ceph_queue_invalidate(inode);
if (wake)
wake_up(&ci->i_cap_wq);
wake_up_all(&ci->i_cap_wq);
if (check_caps == 1)
ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
@ -2460,7 +2460,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
struct ceph_inode_info,
i_flushing_item)->vfs_inode);
mdsc->num_cap_flushing--;
wake_up(&mdsc->cap_flushing_wq);
wake_up_all(&mdsc->cap_flushing_wq);
dout(" inode %p now !flushing\n", inode);
if (ci->i_dirty_caps == 0) {
@ -2472,7 +2472,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
}
}
spin_unlock(&mdsc->cap_dirty_lock);
wake_up(&ci->i_cap_wq);
wake_up_all(&ci->i_cap_wq);
out:
spin_unlock(&inode->i_lock);

View File

@ -265,7 +265,7 @@ int ceph_release(struct inode *inode, struct file *file)
kmem_cache_free(ceph_file_cachep, cf);
/* wake up anyone waiting for caps on this inode */
wake_up(&ci->i_cap_wq);
wake_up_all(&ci->i_cap_wq);
return 0;
}

View File

@ -1501,7 +1501,7 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
if (wrbuffer_refs == 0)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
if (wake)
wake_up(&ci->i_cap_wq);
wake_up_all(&ci->i_cap_wq);
}

View File

@ -868,7 +868,7 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
{
struct ceph_inode_info *ci = ceph_inode(inode);
wake_up(&ci->i_cap_wq);
wake_up_all(&ci->i_cap_wq);
if (arg) {
spin_lock(&inode->i_lock);
ci->i_wanted_max_size = 0;
@ -1564,7 +1564,7 @@ static void complete_request(struct ceph_mds_client *mdsc,
if (req->r_callback)
req->r_callback(mdsc, req);
else
complete(&req->r_completion);
complete_all(&req->r_completion);
}
/*
@ -1932,7 +1932,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
if (head->safe) {
req->r_got_safe = true;
__unregister_request(mdsc, req);
complete(&req->r_safe_completion);
complete_all(&req->r_safe_completion);
if (req->r_got_unsafe) {
/*
@ -1947,7 +1947,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
/* last unsafe request during umount? */
if (mdsc->stopping && !__get_oldest_req(mdsc))
complete(&mdsc->safe_umount_waiters);
complete_all(&mdsc->safe_umount_waiters);
mutex_unlock(&mdsc->mutex);
goto out;
}
@ -2126,7 +2126,7 @@ static void handle_session(struct ceph_mds_session *session,
pr_info("mds%d reconnect denied\n", session->s_mds);
remove_session_caps(session);
wake = 1; /* for good measure */
complete(&mdsc->session_close_waiters);
complete_all(&mdsc->session_close_waiters);
kick_requests(mdsc, mds);
break;

View File

@ -345,7 +345,7 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
out:
mutex_unlock(&monc->mutex);
wake_up(&client->auth_wq);
wake_up_all(&client->auth_wq);
}
/*
@ -462,7 +462,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc,
}
mutex_unlock(&monc->mutex);
if (req) {
complete(&req->completion);
complete_all(&req->completion);
put_generic_request(req);
}
return;
@ -718,7 +718,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
monc->m_auth->front_max);
if (ret < 0) {
monc->client->auth_err = ret;
wake_up(&monc->client->auth_wq);
wake_up_all(&monc->client->auth_wq);
} else if (ret > 0) {
__send_prepared_auth_request(monc, ret);
} else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) {

View File

@ -862,12 +862,12 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
if (req->r_callback)
req->r_callback(req, msg);
else
complete(&req->r_completion);
complete_all(&req->r_completion);
if (flags & CEPH_OSD_FLAG_ONDISK) {
if (req->r_safe_callback)
req->r_safe_callback(req, msg);
complete(&req->r_safe_completion); /* fsync waiter */
complete_all(&req->r_safe_completion); /* fsync waiter */
}
done:
@ -1083,7 +1083,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
if (newmap)
kick_requests(osdc, NULL);
up_read(&osdc->map_sem);
wake_up(&osdc->client->auth_wq);
wake_up_all(&osdc->client->auth_wq);
return;
bad: