mirror of https://gitee.com/openkylin/linux.git
drbd: Replaced md_io_mutex by an atomic: md_io_in_use
The new function drbd_md_get_buffer() aborts waiting for the buffer in case the disk failes in the meantime. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
parent
cc94c65015
commit
e17117310b
|
@ -65,6 +65,23 @@ struct drbd_atodb_wait {
|
|||
|
||||
int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
|
||||
|
||||
void *drbd_md_get_buffer(struct drbd_conf *mdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
wait_event(mdev->misc_wait,
|
||||
(r = atomic_cmpxchg(&mdev->md_io_in_use, 0, 1)) == 0 ||
|
||||
mdev->state.disk <= D_FAILED);
|
||||
|
||||
return r ? NULL : page_address(mdev->md_io_page);
|
||||
}
|
||||
|
||||
void drbd_md_put_buffer(struct drbd_conf *mdev)
|
||||
{
|
||||
if (atomic_dec_and_test(&mdev->md_io_in_use))
|
||||
wake_up(&mdev->misc_wait);
|
||||
}
|
||||
|
||||
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
||||
struct drbd_backing_dev *bdev,
|
||||
struct page *page, sector_t sector,
|
||||
|
@ -90,6 +107,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
|||
bio->bi_end_io = drbd_md_io_complete;
|
||||
bio->bi_rw = rw;
|
||||
|
||||
atomic_inc(&mdev->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
|
||||
if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
|
||||
bio_endio(bio, -EIO);
|
||||
else
|
||||
|
@ -109,7 +127,7 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
|
|||
int offset = 0;
|
||||
struct page *iop = mdev->md_io_page;
|
||||
|
||||
D_ASSERT(mutex_is_locked(&mdev->md_io_mutex));
|
||||
D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
|
||||
|
||||
BUG_ON(!bdev->md_bdev);
|
||||
|
||||
|
@ -326,8 +344,13 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
|
|||
return 1;
|
||||
}
|
||||
|
||||
mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
|
||||
buffer = (struct al_transaction *)page_address(mdev->md_io_page);
|
||||
buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
|
||||
if (!buffer) {
|
||||
dev_err(DEV, "disk failed while waiting for md_io buffer\n");
|
||||
complete(&((struct update_al_work *)w)->event);
|
||||
put_ldev(mdev);
|
||||
return 1;
|
||||
}
|
||||
|
||||
buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
|
||||
buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
|
||||
|
@ -372,7 +395,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
|
|||
D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
|
||||
mdev->al_tr_number++;
|
||||
|
||||
mutex_unlock(&mdev->md_io_mutex);
|
||||
drbd_md_put_buffer(mdev);
|
||||
|
||||
complete(&((struct update_al_work *)w)->event);
|
||||
put_ldev(mdev);
|
||||
|
@ -441,8 +464,9 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
|
|||
/* lock out all other meta data io for now,
|
||||
* and make sure the page is mapped.
|
||||
*/
|
||||
mutex_lock(&mdev->md_io_mutex);
|
||||
buffer = page_address(mdev->md_io_page);
|
||||
buffer = drbd_md_get_buffer(mdev);
|
||||
if (!buffer)
|
||||
return 0;
|
||||
|
||||
/* Find the valid transaction in the log */
|
||||
for (i = 0; i <= mx; i++) {
|
||||
|
@ -450,7 +474,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
|
|||
if (rv == 0)
|
||||
continue;
|
||||
if (rv == -1) {
|
||||
mutex_unlock(&mdev->md_io_mutex);
|
||||
drbd_md_put_buffer(mdev);
|
||||
return 0;
|
||||
}
|
||||
cnr = be32_to_cpu(buffer->tr_number);
|
||||
|
@ -476,7 +500,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
|
|||
|
||||
if (!found_valid) {
|
||||
dev_warn(DEV, "No usable activity log found.\n");
|
||||
mutex_unlock(&mdev->md_io_mutex);
|
||||
drbd_md_put_buffer(mdev);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -491,7 +515,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
|
|||
rv = drbd_al_read_tr(mdev, bdev, buffer, i);
|
||||
ERR_IF(rv == 0) goto cancel;
|
||||
if (rv == -1) {
|
||||
mutex_unlock(&mdev->md_io_mutex);
|
||||
drbd_md_put_buffer(mdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -532,7 +556,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
|
|||
mdev->al_tr_pos = 0;
|
||||
|
||||
/* ok, we are done with it */
|
||||
mutex_unlock(&mdev->md_io_mutex);
|
||||
drbd_md_put_buffer(mdev);
|
||||
|
||||
dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
|
||||
transactions, active_extents);
|
||||
|
|
|
@ -1095,7 +1095,7 @@ struct drbd_conf {
|
|||
struct page *md_io_page; /* one page buffer for md_io */
|
||||
struct page *md_io_tmpp; /* for logical_block_size != 512 */
|
||||
struct drbd_md_io md_io;
|
||||
struct mutex md_io_mutex; /* protects the md_io, md_io_page and md_io_tmpp */
|
||||
atomic_t md_io_in_use; /* protects the md_io, md_io_page and md_io_tmpp */
|
||||
spinlock_t al_lock;
|
||||
wait_queue_head_t al_wait;
|
||||
struct lru_cache *act_log; /* activity log */
|
||||
|
@ -1537,8 +1537,10 @@ extern void resume_next_sg(struct drbd_conf *mdev);
|
|||
extern void suspend_other_sg(struct drbd_conf *mdev);
|
||||
extern int drbd_resync_finished(struct drbd_conf *mdev);
|
||||
/* maybe rather drbd_main.c ? */
|
||||
extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
|
||||
extern void drbd_md_put_buffer(struct drbd_conf *mdev);
|
||||
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
|
||||
struct drbd_backing_dev *bdev, sector_t sector, int rw);
|
||||
struct drbd_backing_dev *bdev, sector_t sector, int rw);
|
||||
extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
|
||||
extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
|
||||
|
||||
|
|
|
@ -3043,8 +3043,8 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
|
|||
atomic_set(&mdev->rs_sect_in, 0);
|
||||
atomic_set(&mdev->rs_sect_ev, 0);
|
||||
atomic_set(&mdev->ap_in_flight, 0);
|
||||
atomic_set(&mdev->md_io_in_use, 0);
|
||||
|
||||
mutex_init(&mdev->md_io_mutex);
|
||||
mutex_init(&mdev->data.mutex);
|
||||
mutex_init(&mdev->meta.mutex);
|
||||
sema_init(&mdev->data.work.s, 0);
|
||||
|
@ -3722,8 +3722,10 @@ void drbd_md_sync(struct drbd_conf *mdev)
|
|||
if (!get_ldev_if_state(mdev, D_FAILED))
|
||||
return;
|
||||
|
||||
mutex_lock(&mdev->md_io_mutex);
|
||||
buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
|
||||
buffer = drbd_md_get_buffer(mdev);
|
||||
if (!buffer)
|
||||
goto out;
|
||||
|
||||
memset(buffer, 0, 512);
|
||||
|
||||
buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
|
||||
|
@ -3754,7 +3756,8 @@ void drbd_md_sync(struct drbd_conf *mdev)
|
|||
* since we updated it on metadata. */
|
||||
mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
|
||||
|
||||
mutex_unlock(&mdev->md_io_mutex);
|
||||
drbd_md_put_buffer(mdev);
|
||||
out:
|
||||
put_ldev(mdev);
|
||||
}
|
||||
|
||||
|
@ -3774,8 +3777,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
|
|||
if (!get_ldev_if_state(mdev, D_ATTACHING))
|
||||
return ERR_IO_MD_DISK;
|
||||
|
||||
mutex_lock(&mdev->md_io_mutex);
|
||||
buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
|
||||
buffer = drbd_md_get_buffer(mdev);
|
||||
if (!buffer)
|
||||
goto out;
|
||||
|
||||
if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
|
||||
/* NOTE: can't do normal error processing here as this is
|
||||
|
@ -3836,7 +3840,8 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
|
|||
mdev->sync_conf.al_extents = 127;
|
||||
|
||||
err:
|
||||
mutex_unlock(&mdev->md_io_mutex);
|
||||
drbd_md_put_buffer(mdev);
|
||||
out:
|
||||
put_ldev(mdev);
|
||||
|
||||
return rv;
|
||||
|
|
|
@ -78,6 +78,7 @@ void drbd_md_io_complete(struct bio *bio, int error)
|
|||
md_io->error = error;
|
||||
|
||||
complete(&md_io->event);
|
||||
drbd_md_put_buffer(mdev);
|
||||
}
|
||||
|
||||
/* reads on behalf of the partner,
|
||||
|
|
Loading…
Reference in New Issue