Merge tag 'md/4.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
Pull MD fixes from Shaohua Li: "There are several bug fixes queued: - fix raid5-cache recovery bugs - fix discard IO error handling for raid1/10 - fix array sync writes bogus position to superblock - fix IO error handling for raid array with external metadata" * tag 'md/4.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md: md: be careful not lot leak internal curr_resync value into metadata. -- (all) raid1: handle read error also in readonly mode raid5-cache: correct condition for empty metadata write md: report 'write_pending' state when array in sync md/raid5: write an empty meta-block when creating log super-block md/raid5: initialize next_checkpoint field before use RAID10: ignore discard error RAID1: ignore discard error
This commit is contained in:
commit
6c286e812d
|
@ -3887,10 +3887,10 @@ array_state_show(struct mddev *mddev, char *page)
|
|||
st = read_auto;
|
||||
break;
|
||||
case 0:
|
||||
if (mddev->in_sync)
|
||||
st = clean;
|
||||
else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
|
||||
if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
|
||||
st = write_pending;
|
||||
else if (mddev->in_sync)
|
||||
st = clean;
|
||||
else if (mddev->safemode)
|
||||
st = active_idle;
|
||||
else
|
||||
|
@ -8144,14 +8144,14 @@ void md_do_sync(struct md_thread *thread)
|
|||
|
||||
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
|
||||
mddev->curr_resync > 2) {
|
||||
mddev->curr_resync > 3) {
|
||||
mddev->curr_resync_completed = mddev->curr_resync;
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
}
|
||||
mddev->pers->sync_request(mddev, max_sectors, &skipped);
|
||||
|
||||
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
|
||||
mddev->curr_resync > 2) {
|
||||
mddev->curr_resync > 3) {
|
||||
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
|
||||
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
|
||||
if (mddev->curr_resync >= mddev->recovery_cp) {
|
||||
|
|
|
@ -403,11 +403,14 @@ static void raid1_end_write_request(struct bio *bio)
|
|||
struct bio *to_put = NULL;
|
||||
int mirror = find_bio_disk(r1_bio, bio);
|
||||
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
|
||||
bool discard_error;
|
||||
|
||||
discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
|
||||
|
||||
/*
|
||||
* 'one mirror IO has finished' event handler:
|
||||
*/
|
||||
if (bio->bi_error) {
|
||||
if (bio->bi_error && !discard_error) {
|
||||
set_bit(WriteErrorSeen, &rdev->flags);
|
||||
if (!test_and_set_bit(WantReplacement, &rdev->flags))
|
||||
set_bit(MD_RECOVERY_NEEDED, &
|
||||
|
@ -444,7 +447,7 @@ static void raid1_end_write_request(struct bio *bio)
|
|||
|
||||
/* Maybe we can clear some bad blocks. */
|
||||
if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
|
||||
&first_bad, &bad_sectors)) {
|
||||
&first_bad, &bad_sectors) && !discard_error) {
|
||||
r1_bio->bios[mirror] = IO_MADE_GOOD;
|
||||
set_bit(R1BIO_MadeGood, &r1_bio->state);
|
||||
}
|
||||
|
@ -2294,17 +2297,23 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
|
|||
* This is all done synchronously while the array is
|
||||
* frozen
|
||||
*/
|
||||
|
||||
bio = r1_bio->bios[r1_bio->read_disk];
|
||||
bdevname(bio->bi_bdev, b);
|
||||
bio_put(bio);
|
||||
r1_bio->bios[r1_bio->read_disk] = NULL;
|
||||
|
||||
if (mddev->ro == 0) {
|
||||
freeze_array(conf, 1);
|
||||
fix_read_error(conf, r1_bio->read_disk,
|
||||
r1_bio->sector, r1_bio->sectors);
|
||||
unfreeze_array(conf);
|
||||
} else
|
||||
md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
|
||||
} else {
|
||||
r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
|
||||
}
|
||||
|
||||
rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
|
||||
|
||||
bio = r1_bio->bios[r1_bio->read_disk];
|
||||
bdevname(bio->bi_bdev, b);
|
||||
read_more:
|
||||
disk = read_balance(conf, r1_bio, &max_sectors);
|
||||
if (disk == -1) {
|
||||
|
@ -2315,11 +2324,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
|
|||
} else {
|
||||
const unsigned long do_sync
|
||||
= r1_bio->master_bio->bi_opf & REQ_SYNC;
|
||||
if (bio) {
|
||||
r1_bio->bios[r1_bio->read_disk] =
|
||||
mddev->ro ? IO_BLOCKED : NULL;
|
||||
bio_put(bio);
|
||||
}
|
||||
r1_bio->read_disk = disk;
|
||||
bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
|
||||
bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
|
||||
|
|
|
@ -447,6 +447,9 @@ static void raid10_end_write_request(struct bio *bio)
|
|||
struct r10conf *conf = r10_bio->mddev->private;
|
||||
int slot, repl;
|
||||
struct md_rdev *rdev = NULL;
|
||||
bool discard_error;
|
||||
|
||||
discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
|
||||
|
||||
dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
|
||||
|
||||
|
@ -460,7 +463,7 @@ static void raid10_end_write_request(struct bio *bio)
|
|||
/*
|
||||
* this branch is our 'one mirror IO has finished' event handler:
|
||||
*/
|
||||
if (bio->bi_error) {
|
||||
if (bio->bi_error && !discard_error) {
|
||||
if (repl)
|
||||
/* Never record new bad blocks to replacement,
|
||||
* just fail it.
|
||||
|
@ -503,7 +506,7 @@ static void raid10_end_write_request(struct bio *bio)
|
|||
if (is_badblock(rdev,
|
||||
r10_bio->devs[slot].addr,
|
||||
r10_bio->sectors,
|
||||
&first_bad, &bad_sectors)) {
|
||||
&first_bad, &bad_sectors) && !discard_error) {
|
||||
bio_put(bio);
|
||||
if (repl)
|
||||
r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
|
||||
|
|
|
@ -1087,7 +1087,7 @@ static int r5l_recovery_log(struct r5l_log *log)
|
|||
* 1's seq + 10 and let superblock points to meta2. The same recovery will
|
||||
* not think meta 3 is a valid meta, because its seq doesn't match
|
||||
*/
|
||||
if (ctx.seq > log->last_cp_seq + 1) {
|
||||
if (ctx.seq > log->last_cp_seq) {
|
||||
int ret;
|
||||
|
||||
ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
|
||||
|
@ -1096,6 +1096,8 @@ static int r5l_recovery_log(struct r5l_log *log)
|
|||
log->seq = ctx.seq + 11;
|
||||
log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
|
||||
r5l_write_super(log, ctx.pos);
|
||||
log->last_checkpoint = ctx.pos;
|
||||
log->next_checkpoint = ctx.pos;
|
||||
} else {
|
||||
log->log_start = ctx.pos;
|
||||
log->seq = ctx.seq;
|
||||
|
@ -1154,6 +1156,7 @@ static int r5l_load_log(struct r5l_log *log)
|
|||
if (create_super) {
|
||||
log->last_cp_seq = prandom_u32();
|
||||
cp = 0;
|
||||
r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
|
||||
/*
|
||||
* Make sure super points to correct address. Log might have
|
||||
* data very soon. If super hasn't correct log tail address,
|
||||
|
@ -1168,6 +1171,7 @@ static int r5l_load_log(struct r5l_log *log)
|
|||
if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
|
||||
log->max_free_space = RECLAIM_MAX_FREE_SPACE;
|
||||
log->last_checkpoint = cp;
|
||||
log->next_checkpoint = cp;
|
||||
|
||||
__free_page(page);
|
||||
|
||||
|
|
Loading…
Reference in New Issue