mirror of https://gitee.com/openkylin/linux.git
md updates for 3.7
"discard" support, some dm-raid improvements and other assorted bits and pieces. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.18 (GNU/Linux) iQIVAwUAUHk6Rjnsnt1WYoG5AQKovQ//Ym0ROo5a6uekb2USLyFSdQH3TC7z0v0+ +kujrgoc4nHZU/vj5yfMvPVomEUsAhHEwTkvvCiXFFHn6cxPzC8ezm8d40xEeISX qp6i2bPlvGURhsW1tYeD+THtY82/oyzQ4Wa/vaE1sjVLQ+caa2q7kVVgAL9Bj/Kz aESIZjAuPxQNE1674/KR0EmMFcbpd0z1WDV+ydKlRV5jHCHGYf8OmxOenJFf+V/b /f9p2u+NUq5BN5WLhThcysO8lPX1Y7GG8IYay3DlSt/crU24R2a2j0qh/BDoK8+t /DceoHipbIiGxXLVjM7y+1RwPpCh75HJSZQHltPype2Z3iwtwEth9uTkEE3M2h/W tOQEbOZku0kcgsrys7JBmpkBwkR9oZqq1kDd4YBzqW4PiGVP6z0JRH8QpjjB+mjN 47ODYIZcaEYZ+0Jj8kcVxo3gv4Xj4DWH+auSNZihTVmjQPVqrcy3CAt3CkuDzTkY 34fZVuCDiCetLGCGQKrwfMDnySVy5xOmtC6iWsEY5rExAeb0E+BCzcBvbAXzt+ef MPDsrxWbo/ZkvpuwXOwLFTccBuRtAsFi7CM4jcow53W6XMnPpdubphNw5nylaEm1 DEzfID58mv8VHWRuW15vr7SbtROjYJkEFCIaEK3oprrRUYftZntIABcknqvcIYR+ /ULNzkRU1w4= =XRmL -----END PGP SIGNATURE----- Merge tag 'md-3.7' of git://neil.brown.name/md Pull md updates from NeilBrown: - "discard" support, some dm-raid improvements and other assorted bits and pieces. * tag 'md-3.7' of git://neil.brown.name/md: (29 commits) md: refine reporting of resync/reshape delays. md/raid5: be careful not to resize_stripes too big. md: make sure manual changes to recovery checkpoint are saved. md/raid10: use correct limit variable md: writing to sync_action should clear the read-auto state. Subject: [PATCH] md:change resync_mismatches to atomic64_t to avoid races md/raid5: make sure to_read and to_write never go negative. md: When RAID5 is dirty, force reconstruct-write instead of read-modify-write. md/raid5: protect debug message against NULL derefernce. md/raid5: add some missing locking in handle_failed_stripe. MD: raid5 avoid unnecessary zero page for trim MD: raid5 trim support md/bitmap:Don't use IS_ERR to judge alloc_page(). md/raid1: Don't release reference to device while handling read error. raid: replace list_for_each_continue_rcu with new interface add further __init annotations to crypto/xor.c DM RAID: Fix for "sync" directive ineffectiveness DM RAID: Fix comparison of index and quantity for "rebuild" parameter DM RAID: Add rebuild capability for RAID10 DM RAID: Move 'rebuild' checking code to its own function ...
This commit is contained in:
commit
9db908806b
|
@ -132,3 +132,12 @@ Here we can see the RAID type is raid4, there are 5 devices - all of
|
|||
which are 'A'live, and the array is 2/490221568 complete with recovery.
|
||||
Faulty or missing devices are marked 'D'. Devices that are out-of-sync
|
||||
are marked 'a'.
|
||||
|
||||
|
||||
Version History
|
||||
---------------
|
||||
1.0.0 Initial version. Support for RAID 4/5/6
|
||||
1.1.0 Added support for RAID 1
|
||||
1.2.0 Handle creation of arrays that contain failed devices.
|
||||
1.3.0 Added support for RAID 10
|
||||
1.3.1 Allow device replacement/rebuild for RAID 10
|
||||
|
|
|
@ -56,11 +56,11 @@ xor_blocks(unsigned int src_count, unsigned int bytes, void *dest, void **srcs)
|
|||
EXPORT_SYMBOL(xor_blocks);
|
||||
|
||||
/* Set of all registered templates. */
|
||||
static struct xor_block_template *template_list;
|
||||
static struct xor_block_template *__initdata template_list;
|
||||
|
||||
#define BENCH_SIZE (PAGE_SIZE)
|
||||
|
||||
static void
|
||||
static void __init
|
||||
do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
|
||||
{
|
||||
int speed;
|
||||
|
|
|
@ -163,20 +163,17 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
|
|||
* As devices are only added or removed when raid_disk is < 0 and
|
||||
* nr_pending is 0 and In_sync is clear, the entries we return will
|
||||
* still be in the same position on the list when we re-enter
|
||||
* list_for_each_continue_rcu.
|
||||
* list_for_each_entry_continue_rcu.
|
||||
*/
|
||||
struct list_head *pos;
|
||||
rcu_read_lock();
|
||||
if (rdev == NULL)
|
||||
/* start at the beginning */
|
||||
pos = &mddev->disks;
|
||||
rdev = list_entry_rcu(&mddev->disks, struct md_rdev, same_set);
|
||||
else {
|
||||
/* release the previous rdev and start from there. */
|
||||
rdev_dec_pending(rdev, mddev);
|
||||
pos = &rdev->same_set;
|
||||
}
|
||||
list_for_each_continue_rcu(pos, &mddev->disks) {
|
||||
rdev = list_entry(pos, struct md_rdev, same_set);
|
||||
list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
|
||||
if (rdev->raid_disk >= 0 &&
|
||||
!test_bit(Faulty, &rdev->flags)) {
|
||||
/* this is a usable devices */
|
||||
|
@ -473,14 +470,10 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
|
|||
{
|
||||
bitmap_super_t *sb;
|
||||
unsigned long chunksize, daemon_sleep, write_behind;
|
||||
int err = -EINVAL;
|
||||
|
||||
bitmap->storage.sb_page = alloc_page(GFP_KERNEL);
|
||||
if (IS_ERR(bitmap->storage.sb_page)) {
|
||||
err = PTR_ERR(bitmap->storage.sb_page);
|
||||
bitmap->storage.sb_page = NULL;
|
||||
return err;
|
||||
}
|
||||
if (bitmap->storage.sb_page == NULL)
|
||||
return -ENOMEM;
|
||||
bitmap->storage.sb_page->index = 0;
|
||||
|
||||
sb = kmap_atomic(bitmap->storage.sb_page);
|
||||
|
|
|
@ -337,6 +337,84 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* validate_rebuild_devices
|
||||
* @rs
|
||||
*
|
||||
* Determine if the devices specified for rebuild can result in a valid
|
||||
* usable array that is capable of rebuilding the given devices.
|
||||
*
|
||||
* Returns: 0 on success, -EINVAL on failure.
|
||||
*/
|
||||
static int validate_rebuild_devices(struct raid_set *rs)
|
||||
{
|
||||
unsigned i, rebuild_cnt = 0;
|
||||
unsigned rebuilds_per_group, copies, d;
|
||||
|
||||
if (!(rs->print_flags & DMPF_REBUILD))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < rs->md.raid_disks; i++)
|
||||
if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
|
||||
rebuild_cnt++;
|
||||
|
||||
switch (rs->raid_type->level) {
|
||||
case 1:
|
||||
if (rebuild_cnt >= rs->md.raid_disks)
|
||||
goto too_many;
|
||||
break;
|
||||
case 4:
|
||||
case 5:
|
||||
case 6:
|
||||
if (rebuild_cnt > rs->raid_type->parity_devs)
|
||||
goto too_many;
|
||||
break;
|
||||
case 10:
|
||||
copies = raid10_md_layout_to_copies(rs->md.layout);
|
||||
if (rebuild_cnt < copies)
|
||||
break;
|
||||
|
||||
/*
|
||||
* It is possible to have a higher rebuild count for RAID10,
|
||||
* as long as the failed devices occur in different mirror
|
||||
* groups (i.e. different stripes).
|
||||
*
|
||||
* Right now, we only allow for "near" copies. When other
|
||||
* formats are added, we will have to check those too.
|
||||
*
|
||||
* When checking "near" format, make sure no adjacent devices
|
||||
* have failed beyond what can be handled. In addition to the
|
||||
* simple case where the number of devices is a multiple of the
|
||||
* number of copies, we must also handle cases where the number
|
||||
* of devices is not a multiple of the number of copies.
|
||||
* E.g. dev1 dev2 dev3 dev4 dev5
|
||||
* A A B B C
|
||||
* C D D E E
|
||||
*/
|
||||
rebuilds_per_group = 0;
|
||||
for (i = 0; i < rs->md.raid_disks * copies; i++) {
|
||||
d = i % rs->md.raid_disks;
|
||||
if (!test_bit(In_sync, &rs->dev[d].rdev.flags) &&
|
||||
(++rebuilds_per_group >= copies))
|
||||
goto too_many;
|
||||
if (!((i + 1) % copies))
|
||||
rebuilds_per_group = 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DMERR("The rebuild parameter is not supported for %s",
|
||||
rs->raid_type->name);
|
||||
rs->ti->error = "Rebuild not supported for this RAID type";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
too_many:
|
||||
rs->ti->error = "Too many rebuild devices specified";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Possible arguments are...
|
||||
* <chunk_size> [optional_args]
|
||||
|
@ -365,7 +443,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
|
|||
{
|
||||
char *raid10_format = "near";
|
||||
unsigned raid10_copies = 2;
|
||||
unsigned i, rebuild_cnt = 0;
|
||||
unsigned i;
|
||||
unsigned long value, region_size = 0;
|
||||
sector_t sectors_per_dev = rs->ti->len;
|
||||
sector_t max_io_len;
|
||||
|
@ -461,31 +539,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
|
|||
|
||||
/* Parameters that take a numeric value are checked here */
|
||||
if (!strcasecmp(key, "rebuild")) {
|
||||
rebuild_cnt++;
|
||||
|
||||
switch (rs->raid_type->level) {
|
||||
case 1:
|
||||
if (rebuild_cnt >= rs->md.raid_disks) {
|
||||
rs->ti->error = "Too many rebuild devices specified";
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
case 5:
|
||||
case 6:
|
||||
if (rebuild_cnt > rs->raid_type->parity_devs) {
|
||||
rs->ti->error = "Too many rebuild devices specified for given RAID type";
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case 10:
|
||||
default:
|
||||
DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name);
|
||||
rs->ti->error = "Rebuild not supported for this RAID type";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (value > rs->md.raid_disks) {
|
||||
if (value >= rs->md.raid_disks) {
|
||||
rs->ti->error = "Invalid rebuild index given";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -608,6 +662,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
|
|||
}
|
||||
rs->md.dev_sectors = sectors_per_dev;
|
||||
|
||||
if (validate_rebuild_devices(rs))
|
||||
return -EINVAL;
|
||||
|
||||
/* Assume there are no metadata devices until the drives are parsed */
|
||||
rs->md.persistent = 0;
|
||||
rs->md.external = 1;
|
||||
|
@ -960,6 +1017,19 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
|
|||
|
||||
freshest = NULL;
|
||||
rdev_for_each_safe(rdev, tmp, mddev) {
|
||||
/*
|
||||
* Skipping super_load due to DMPF_SYNC will cause
|
||||
* the array to undergo initialization again as
|
||||
* though it were new. This is the intended effect
|
||||
* of the "sync" directive.
|
||||
*
|
||||
* When reshaping capability is added, we must ensure
|
||||
* that the "sync" directive is disallowed during the
|
||||
* reshape.
|
||||
*/
|
||||
if (rs->print_flags & DMPF_SYNC)
|
||||
continue;
|
||||
|
||||
if (!rdev->meta_bdev)
|
||||
continue;
|
||||
|
||||
|
@ -1360,7 +1430,7 @@ static void raid_resume(struct dm_target *ti)
|
|||
|
||||
static struct target_type raid_target = {
|
||||
.name = "raid",
|
||||
.version = {1, 3, 0},
|
||||
.version = {1, 3, 1},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = raid_ctr,
|
||||
.dtr = raid_dtr,
|
||||
|
|
|
@ -138,6 +138,7 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
|
|||
struct linear_conf *conf;
|
||||
struct md_rdev *rdev;
|
||||
int i, cnt;
|
||||
bool discard_supported = false;
|
||||
|
||||
conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info),
|
||||
GFP_KERNEL);
|
||||
|
@ -171,6 +172,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
|
|||
conf->array_sectors += rdev->sectors;
|
||||
cnt++;
|
||||
|
||||
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
|
||||
discard_supported = true;
|
||||
}
|
||||
if (cnt != raid_disks) {
|
||||
printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
|
||||
|
@ -178,6 +181,11 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!discard_supported)
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
||||
else
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
||||
|
||||
/*
|
||||
* Here we calculate the device offsets.
|
||||
*/
|
||||
|
@ -244,7 +252,9 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
|
|||
if (!newconf)
|
||||
return -ENOMEM;
|
||||
|
||||
oldconf = rcu_dereference(mddev->private);
|
||||
oldconf = rcu_dereference_protected(mddev->private,
|
||||
lockdep_is_held(
|
||||
&mddev->reconfig_mutex));
|
||||
mddev->raid_disks++;
|
||||
rcu_assign_pointer(mddev->private, newconf);
|
||||
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
|
||||
|
@ -256,7 +266,10 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
|
|||
|
||||
static int linear_stop (struct mddev *mddev)
|
||||
{
|
||||
struct linear_conf *conf = mddev->private;
|
||||
struct linear_conf *conf =
|
||||
rcu_dereference_protected(mddev->private,
|
||||
lockdep_is_held(
|
||||
&mddev->reconfig_mutex));
|
||||
|
||||
/*
|
||||
* We do not require rcu protection here since
|
||||
|
@ -326,6 +339,14 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
|
|||
bio->bi_sector = bio->bi_sector - start_sector
|
||||
+ tmp_dev->rdev->data_offset;
|
||||
rcu_read_unlock();
|
||||
|
||||
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
|
||||
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
|
||||
/* Just ignore it */
|
||||
bio_endio(bio, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
generic_make_request(bio);
|
||||
}
|
||||
|
||||
|
|
145
drivers/md/md.c
145
drivers/md/md.c
|
@ -674,7 +674,18 @@ static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev)
|
||||
static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
|
||||
{
|
||||
struct md_rdev *rdev;
|
||||
|
||||
rdev_for_each_rcu(rdev, mddev)
|
||||
if (rdev->desc_nr == nr)
|
||||
return rdev;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
|
||||
{
|
||||
struct md_rdev *rdev;
|
||||
|
||||
|
@ -685,6 +696,17 @@ static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
|
||||
{
|
||||
struct md_rdev *rdev;
|
||||
|
||||
rdev_for_each_rcu(rdev, mddev)
|
||||
if (rdev->bdev->bd_dev == dev)
|
||||
return rdev;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct md_personality *find_pers(int level, char *clevel)
|
||||
{
|
||||
struct md_personality *pers;
|
||||
|
@ -2022,8 +2044,14 @@ EXPORT_SYMBOL(md_integrity_register);
|
|||
/* Disable data integrity if non-capable/non-matching disk is being added */
|
||||
void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
|
||||
{
|
||||
struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
|
||||
struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
|
||||
struct blk_integrity *bi_rdev;
|
||||
struct blk_integrity *bi_mddev;
|
||||
|
||||
if (!mddev->gendisk)
|
||||
return;
|
||||
|
||||
bi_rdev = bdev_get_integrity(rdev->bdev);
|
||||
bi_mddev = blk_get_integrity(mddev->gendisk);
|
||||
|
||||
if (!bi_mddev) /* nothing to do */
|
||||
return;
|
||||
|
@ -3754,6 +3782,8 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
|
|||
return -EINVAL;
|
||||
|
||||
mddev->recovery_cp = n;
|
||||
if (mddev->pers)
|
||||
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
|
||||
return len;
|
||||
}
|
||||
static struct md_sysfs_entry md_resync_start =
|
||||
|
@ -4231,6 +4261,13 @@ action_store(struct mddev *mddev, const char *page, size_t len)
|
|||
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
|
||||
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
|
||||
}
|
||||
if (mddev->ro == 2) {
|
||||
/* A write to sync_action is enough to justify
|
||||
* canceling read-auto mode
|
||||
*/
|
||||
mddev->ro = 0;
|
||||
md_wakeup_thread(mddev->sync_thread);
|
||||
}
|
||||
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_action);
|
||||
|
@ -4241,7 +4278,8 @@ static ssize_t
|
|||
mismatch_cnt_show(struct mddev *mddev, char *page)
|
||||
{
|
||||
return sprintf(page, "%llu\n",
|
||||
(unsigned long long) mddev->resync_mismatches);
|
||||
(unsigned long long)
|
||||
atomic64_read(&mddev->resync_mismatches));
|
||||
}
|
||||
|
||||
static struct md_sysfs_entry md_scan_mode =
|
||||
|
@ -4362,6 +4400,10 @@ sync_completed_show(struct mddev *mddev, char *page)
|
|||
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
||||
return sprintf(page, "none\n");
|
||||
|
||||
if (mddev->curr_resync == 1 ||
|
||||
mddev->curr_resync == 2)
|
||||
return sprintf(page, "delayed\n");
|
||||
|
||||
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
|
||||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
|
||||
max_sectors = mddev->resync_max_sectors;
|
||||
|
@ -5207,7 +5249,7 @@ static void md_clean(struct mddev *mddev)
|
|||
mddev->new_layout = 0;
|
||||
mddev->new_chunk_sectors = 0;
|
||||
mddev->curr_resync = 0;
|
||||
mddev->resync_mismatches = 0;
|
||||
atomic64_set(&mddev->resync_mismatches, 0);
|
||||
mddev->suspend_lo = mddev->suspend_hi = 0;
|
||||
mddev->sync_speed_min = mddev->sync_speed_max = 0;
|
||||
mddev->recovery = 0;
|
||||
|
@ -5509,8 +5551,9 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
|
|||
int nr,working,insync,failed,spare;
|
||||
struct md_rdev *rdev;
|
||||
|
||||
nr=working=insync=failed=spare=0;
|
||||
rdev_for_each(rdev, mddev) {
|
||||
nr = working = insync = failed = spare = 0;
|
||||
rcu_read_lock();
|
||||
rdev_for_each_rcu(rdev, mddev) {
|
||||
nr++;
|
||||
if (test_bit(Faulty, &rdev->flags))
|
||||
failed++;
|
||||
|
@ -5522,6 +5565,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
|
|||
spare++;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
info.major_version = mddev->major_version;
|
||||
info.minor_version = mddev->minor_version;
|
||||
|
@ -5605,7 +5649,8 @@ static int get_disk_info(struct mddev * mddev, void __user * arg)
|
|||
if (copy_from_user(&info, arg, sizeof(info)))
|
||||
return -EFAULT;
|
||||
|
||||
rdev = find_rdev_nr(mddev, info.number);
|
||||
rcu_read_lock();
|
||||
rdev = find_rdev_nr_rcu(mddev, info.number);
|
||||
if (rdev) {
|
||||
info.major = MAJOR(rdev->bdev->bd_dev);
|
||||
info.minor = MINOR(rdev->bdev->bd_dev);
|
||||
|
@ -5624,6 +5669,7 @@ static int get_disk_info(struct mddev * mddev, void __user * arg)
|
|||
info.raid_disk = -1;
|
||||
info.state = (1<<MD_DISK_REMOVED);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (copy_to_user(arg, &info, sizeof(info)))
|
||||
return -EFAULT;
|
||||
|
@ -6232,18 +6278,22 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
|
|||
static int set_disk_faulty(struct mddev *mddev, dev_t dev)
|
||||
{
|
||||
struct md_rdev *rdev;
|
||||
int err = 0;
|
||||
|
||||
if (mddev->pers == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
rdev = find_rdev(mddev, dev);
|
||||
rcu_read_lock();
|
||||
rdev = find_rdev_rcu(mddev, dev);
|
||||
if (!rdev)
|
||||
return -ENODEV;
|
||||
|
||||
md_error(mddev, rdev);
|
||||
if (!test_bit(Faulty, &rdev->flags))
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
err = -ENODEV;
|
||||
else {
|
||||
md_error(mddev, rdev);
|
||||
if (!test_bit(Faulty, &rdev->flags))
|
||||
err = -EBUSY;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6315,6 +6365,27 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
goto abort;
|
||||
}
|
||||
|
||||
/* Some actions do not requires the mutex */
|
||||
switch (cmd) {
|
||||
case GET_ARRAY_INFO:
|
||||
if (!mddev->raid_disks && !mddev->external)
|
||||
err = -ENODEV;
|
||||
else
|
||||
err = get_array_info(mddev, argp);
|
||||
goto abort;
|
||||
|
||||
case GET_DISK_INFO:
|
||||
if (!mddev->raid_disks && !mddev->external)
|
||||
err = -ENODEV;
|
||||
else
|
||||
err = get_disk_info(mddev, argp);
|
||||
goto abort;
|
||||
|
||||
case SET_DISK_FAULTY:
|
||||
err = set_disk_faulty(mddev, new_decode_dev(arg));
|
||||
goto abort;
|
||||
}
|
||||
|
||||
err = mddev_lock(mddev);
|
||||
if (err) {
|
||||
printk(KERN_INFO
|
||||
|
@ -6387,18 +6458,10 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
*/
|
||||
switch (cmd)
|
||||
{
|
||||
case GET_ARRAY_INFO:
|
||||
err = get_array_info(mddev, argp);
|
||||
goto done_unlock;
|
||||
|
||||
case GET_BITMAP_FILE:
|
||||
err = get_bitmap_file(mddev, argp);
|
||||
goto done_unlock;
|
||||
|
||||
case GET_DISK_INFO:
|
||||
err = get_disk_info(mddev, argp);
|
||||
goto done_unlock;
|
||||
|
||||
case RESTART_ARRAY_RW:
|
||||
err = restart_array(mddev);
|
||||
goto done_unlock;
|
||||
|
@ -6480,10 +6543,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
err = hot_add_disk(mddev, new_decode_dev(arg));
|
||||
goto done_unlock;
|
||||
|
||||
case SET_DISK_FAULTY:
|
||||
err = set_disk_faulty(mddev, new_decode_dev(arg));
|
||||
goto done_unlock;
|
||||
|
||||
case RUN_ARRAY:
|
||||
err = do_md_run(mddev);
|
||||
goto done_unlock;
|
||||
|
@ -6641,7 +6700,7 @@ static int md_thread(void * arg)
|
|||
|
||||
clear_bit(THREAD_WAKEUP, &thread->flags);
|
||||
if (!kthread_should_stop())
|
||||
thread->run(thread->mddev);
|
||||
thread->run(thread);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -6656,8 +6715,8 @@ void md_wakeup_thread(struct md_thread *thread)
|
|||
}
|
||||
}
|
||||
|
||||
struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev *mddev,
|
||||
const char *name)
|
||||
struct md_thread *md_register_thread(void (*run) (struct md_thread *),
|
||||
struct mddev *mddev, const char *name)
|
||||
{
|
||||
struct md_thread *thread;
|
||||
|
||||
|
@ -6752,7 +6811,11 @@ static void status_resync(struct seq_file *seq, struct mddev * mddev)
|
|||
int scale;
|
||||
unsigned int per_milli;
|
||||
|
||||
resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
|
||||
if (mddev->curr_resync <= 3)
|
||||
resync = 0;
|
||||
else
|
||||
resync = mddev->curr_resync
|
||||
- atomic_read(&mddev->recovery_active);
|
||||
|
||||
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
|
||||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
|
||||
|
@ -6978,7 +7041,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
|
|||
if (mddev->curr_resync > 2) {
|
||||
status_resync(seq, mddev);
|
||||
seq_printf(seq, "\n ");
|
||||
} else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
|
||||
} else if (mddev->curr_resync >= 1)
|
||||
seq_printf(seq, "\tresync=DELAYED\n ");
|
||||
else if (mddev->recovery_cp < MaxSector)
|
||||
seq_printf(seq, "\tresync=PENDING\n ");
|
||||
|
@ -7206,8 +7269,9 @@ EXPORT_SYMBOL_GPL(md_allow_write);
|
|||
|
||||
#define SYNC_MARKS 10
|
||||
#define SYNC_MARK_STEP (3*HZ)
|
||||
void md_do_sync(struct mddev *mddev)
|
||||
void md_do_sync(struct md_thread *thread)
|
||||
{
|
||||
struct mddev *mddev = thread->mddev;
|
||||
struct mddev *mddev2;
|
||||
unsigned int currspeed = 0,
|
||||
window;
|
||||
|
@ -7311,7 +7375,7 @@ void md_do_sync(struct mddev *mddev)
|
|||
* which defaults to physical size, but can be virtual size
|
||||
*/
|
||||
max_sectors = mddev->resync_max_sectors;
|
||||
mddev->resync_mismatches = 0;
|
||||
atomic64_set(&mddev->resync_mismatches, 0);
|
||||
/* we don't use the checkpoint if there's a bitmap */
|
||||
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
|
||||
j = mddev->resync_min;
|
||||
|
@ -7367,8 +7431,11 @@ void md_do_sync(struct mddev *mddev)
|
|||
"md: resuming %s of %s from checkpoint.\n",
|
||||
desc, mdname(mddev));
|
||||
mddev->curr_resync = j;
|
||||
}
|
||||
} else
|
||||
mddev->curr_resync = 3; /* no longer delayed */
|
||||
mddev->curr_resync_completed = j;
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
md_new_event(mddev);
|
||||
|
||||
blk_start_plug(&plug);
|
||||
while (j < max_sectors) {
|
||||
|
@ -7421,7 +7488,8 @@ void md_do_sync(struct mddev *mddev)
|
|||
break;
|
||||
|
||||
j += sectors;
|
||||
if (j>1) mddev->curr_resync = j;
|
||||
if (j > 2)
|
||||
mddev->curr_resync = j;
|
||||
mddev->curr_mark_cnt = io_sectors;
|
||||
if (last_check == 0)
|
||||
/* this is the earliest that rebuild will be
|
||||
|
@ -7543,8 +7611,6 @@ static int remove_and_add_spares(struct mddev *mddev)
|
|||
int spares = 0;
|
||||
int removed = 0;
|
||||
|
||||
mddev->curr_resync_completed = 0;
|
||||
|
||||
rdev_for_each(rdev, mddev)
|
||||
if (rdev->raid_disk >= 0 &&
|
||||
!test_bit(Blocked, &rdev->flags) &&
|
||||
|
@ -7739,6 +7805,7 @@ void md_check_recovery(struct mddev *mddev)
|
|||
/* Set RUNNING before clearing NEEDED to avoid
|
||||
* any transients in the value of "sync_action".
|
||||
*/
|
||||
mddev->curr_resync_completed = 0;
|
||||
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
|
||||
/* Clear some bits that don't mean anything, but
|
||||
* might be left set
|
||||
|
@ -7752,7 +7819,7 @@ void md_check_recovery(struct mddev *mddev)
|
|||
/* no recovery is running.
|
||||
* remove any failed drives, then
|
||||
* add spares if possible.
|
||||
* Spare are also removed and re-added, to allow
|
||||
* Spares are also removed and re-added, to allow
|
||||
* the personality to fail the re-add.
|
||||
*/
|
||||
|
||||
|
|
|
@ -282,7 +282,7 @@ struct mddev {
|
|||
|
||||
sector_t resync_max_sectors; /* may be set by personality */
|
||||
|
||||
sector_t resync_mismatches; /* count of sectors where
|
||||
atomic64_t resync_mismatches; /* count of sectors where
|
||||
* parity/replica mismatch found
|
||||
*/
|
||||
|
||||
|
@ -540,12 +540,13 @@ static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
|
|||
list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
|
||||
|
||||
struct md_thread {
|
||||
void (*run) (struct mddev *mddev);
|
||||
void (*run) (struct md_thread *thread);
|
||||
struct mddev *mddev;
|
||||
wait_queue_head_t wqueue;
|
||||
unsigned long flags;
|
||||
struct task_struct *tsk;
|
||||
unsigned long timeout;
|
||||
void *private;
|
||||
};
|
||||
|
||||
#define THREAD_WAKEUP 0
|
||||
|
@ -584,7 +585,7 @@ static inline void safe_put_page(struct page *p)
|
|||
extern int register_md_personality(struct md_personality *p);
|
||||
extern int unregister_md_personality(struct md_personality *p);
|
||||
extern struct md_thread *md_register_thread(
|
||||
void (*run)(struct mddev *mddev),
|
||||
void (*run)(struct md_thread *thread),
|
||||
struct mddev *mddev,
|
||||
const char *name);
|
||||
extern void md_unregister_thread(struct md_thread **threadp);
|
||||
|
@ -603,7 +604,7 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
|
|||
extern void md_super_wait(struct mddev *mddev);
|
||||
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
|
||||
struct page *page, int rw, bool metadata_op);
|
||||
extern void md_do_sync(struct mddev *mddev);
|
||||
extern void md_do_sync(struct md_thread *thread);
|
||||
extern void md_new_event(struct mddev *mddev);
|
||||
extern int md_allow_write(struct mddev *mddev);
|
||||
extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
|
||||
|
|
|
@ -335,8 +335,9 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
|
|||
* 3. Performs writes following reads for array syncronising.
|
||||
*/
|
||||
|
||||
static void multipathd (struct mddev *mddev)
|
||||
static void multipathd(struct md_thread *thread)
|
||||
{
|
||||
struct mddev *mddev = thread->mddev;
|
||||
struct multipath_bh *mp_bh;
|
||||
struct bio *bio;
|
||||
unsigned long flags;
|
||||
|
|
|
@ -88,6 +88,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
|||
char b[BDEVNAME_SIZE];
|
||||
char b2[BDEVNAME_SIZE];
|
||||
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
|
||||
bool discard_supported = false;
|
||||
|
||||
if (!conf)
|
||||
return -ENOMEM;
|
||||
|
@ -195,6 +196,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
|||
if (!smallest || (rdev1->sectors < smallest->sectors))
|
||||
smallest = rdev1;
|
||||
cnt++;
|
||||
|
||||
if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
|
||||
discard_supported = true;
|
||||
}
|
||||
if (cnt != mddev->raid_disks) {
|
||||
printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
|
||||
|
@ -272,6 +276,11 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
|||
blk_queue_io_opt(mddev->queue,
|
||||
(mddev->chunk_sectors << 9) * mddev->raid_disks);
|
||||
|
||||
if (!discard_supported)
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
||||
else
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
||||
|
||||
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
|
||||
*private_conf = conf;
|
||||
|
||||
|
@ -423,6 +432,7 @@ static int raid0_run(struct mddev *mddev)
|
|||
return -EINVAL;
|
||||
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
|
||||
blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
|
||||
blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
|
||||
|
||||
/* if private is not null, we are here after takeover */
|
||||
if (mddev->private == NULL) {
|
||||
|
@ -510,7 +520,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
|
|||
sector_t sector = bio->bi_sector;
|
||||
struct bio_pair *bp;
|
||||
/* Sanity check -- queue functions should prevent this happening */
|
||||
if (bio->bi_vcnt != 1 ||
|
||||
if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
|
||||
bio->bi_idx != 0)
|
||||
goto bad_map;
|
||||
/* This is a one page bio that upper layers
|
||||
|
@ -536,6 +546,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
|
|||
bio->bi_sector = sector_offset + zone->dev_start +
|
||||
tmp_dev->data_offset;
|
||||
|
||||
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
|
||||
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
|
||||
/* Just ignore it */
|
||||
bio_endio(bio, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
generic_make_request(bio);
|
||||
return;
|
||||
|
||||
|
|
|
@ -333,9 +333,10 @@ static void raid1_end_read_request(struct bio *bio, int error)
|
|||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
}
|
||||
|
||||
if (uptodate)
|
||||
if (uptodate) {
|
||||
raid_end_bio_io(r1_bio);
|
||||
else {
|
||||
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
|
||||
} else {
|
||||
/*
|
||||
* oops, read error:
|
||||
*/
|
||||
|
@ -349,9 +350,8 @@ static void raid1_end_read_request(struct bio *bio, int error)
|
|||
(unsigned long long)r1_bio->sector);
|
||||
set_bit(R1BIO_ReadError, &r1_bio->state);
|
||||
reschedule_retry(r1_bio);
|
||||
/* don't drop the reference on read_disk yet */
|
||||
}
|
||||
|
||||
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
|
||||
}
|
||||
|
||||
static void close_write(struct r1bio *r1_bio)
|
||||
|
@ -781,7 +781,12 @@ static void flush_pending_writes(struct r1conf *conf)
|
|||
while (bio) { /* submit pending writes */
|
||||
struct bio *next = bio->bi_next;
|
||||
bio->bi_next = NULL;
|
||||
generic_make_request(bio);
|
||||
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
|
||||
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
|
||||
/* Just ignore it */
|
||||
bio_endio(bio, 0);
|
||||
else
|
||||
generic_make_request(bio);
|
||||
bio = next;
|
||||
}
|
||||
} else
|
||||
|
@ -994,6 +999,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
|||
const int rw = bio_data_dir(bio);
|
||||
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
|
||||
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
|
||||
const unsigned long do_discard = (bio->bi_rw
|
||||
& (REQ_DISCARD | REQ_SECURE));
|
||||
struct md_rdev *blocked_rdev;
|
||||
struct blk_plug_cb *cb;
|
||||
struct raid1_plug_cb *plug = NULL;
|
||||
|
@ -1295,7 +1302,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
|||
conf->mirrors[i].rdev->data_offset);
|
||||
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
|
||||
mbio->bi_end_io = raid1_end_write_request;
|
||||
mbio->bi_rw = WRITE | do_flush_fua | do_sync;
|
||||
mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
|
||||
mbio->bi_private = r1_bio;
|
||||
|
||||
atomic_inc(&r1_bio->remaining);
|
||||
|
@ -1549,6 +1556,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
|
|||
clear_bit(Unmerged, &rdev->flags);
|
||||
}
|
||||
md_integrity_add_rdev(rdev, mddev);
|
||||
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
||||
print_conf(conf);
|
||||
return err;
|
||||
}
|
||||
|
@ -1867,7 +1876,7 @@ static int process_checks(struct r1bio *r1_bio)
|
|||
} else
|
||||
j = 0;
|
||||
if (j >= 0)
|
||||
mddev->resync_mismatches += r1_bio->sectors;
|
||||
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
|
||||
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
|
||||
&& test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
|
||||
/* No need to write to this device. */
|
||||
|
@ -2220,6 +2229,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
|
|||
unfreeze_array(conf);
|
||||
} else
|
||||
md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
|
||||
rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
|
||||
|
||||
bio = r1_bio->bios[r1_bio->read_disk];
|
||||
bdevname(bio->bi_bdev, b);
|
||||
|
@ -2285,8 +2295,9 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
|
|||
}
|
||||
}
|
||||
|
||||
static void raid1d(struct mddev *mddev)
|
||||
static void raid1d(struct md_thread *thread)
|
||||
{
|
||||
struct mddev *mddev = thread->mddev;
|
||||
struct r1bio *r1_bio;
|
||||
unsigned long flags;
|
||||
struct r1conf *conf = mddev->private;
|
||||
|
@ -2783,6 +2794,7 @@ static int run(struct mddev *mddev)
|
|||
int i;
|
||||
struct md_rdev *rdev;
|
||||
int ret;
|
||||
bool discard_supported = false;
|
||||
|
||||
if (mddev->level != 1) {
|
||||
printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
|
||||
|
@ -2812,6 +2824,8 @@ static int run(struct mddev *mddev)
|
|||
continue;
|
||||
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
||||
rdev->data_offset << 9);
|
||||
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
|
||||
discard_supported = true;
|
||||
}
|
||||
|
||||
mddev->degraded = 0;
|
||||
|
@ -2846,6 +2860,13 @@ static int run(struct mddev *mddev)
|
|||
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
|
||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||
blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
|
||||
|
||||
if (discard_supported)
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
|
||||
mddev->queue);
|
||||
else
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
|
||||
mddev->queue);
|
||||
}
|
||||
|
||||
ret = md_integrity_register(mddev);
|
||||
|
|
|
@ -911,7 +911,12 @@ static void flush_pending_writes(struct r10conf *conf)
|
|||
while (bio) { /* submit pending writes */
|
||||
struct bio *next = bio->bi_next;
|
||||
bio->bi_next = NULL;
|
||||
generic_make_request(bio);
|
||||
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
|
||||
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
|
||||
/* Just ignore it */
|
||||
bio_endio(bio, 0);
|
||||
else
|
||||
generic_make_request(bio);
|
||||
bio = next;
|
||||
}
|
||||
} else
|
||||
|
@ -1050,6 +1055,44 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
|
|||
return rdev->new_data_offset;
|
||||
}
|
||||
|
||||
struct raid10_plug_cb {
|
||||
struct blk_plug_cb cb;
|
||||
struct bio_list pending;
|
||||
int pending_cnt;
|
||||
};
|
||||
|
||||
static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||
{
|
||||
struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
|
||||
cb);
|
||||
struct mddev *mddev = plug->cb.data;
|
||||
struct r10conf *conf = mddev->private;
|
||||
struct bio *bio;
|
||||
|
||||
if (from_schedule) {
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
bio_list_merge(&conf->pending_bio_list, &plug->pending);
|
||||
conf->pending_count += plug->pending_cnt;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
kfree(plug);
|
||||
return;
|
||||
}
|
||||
|
||||
/* we aren't scheduling, so we can do the write-out directly. */
|
||||
bio = bio_list_get(&plug->pending);
|
||||
bitmap_unplug(mddev->bitmap);
|
||||
wake_up(&conf->wait_barrier);
|
||||
|
||||
while (bio) { /* submit pending writes */
|
||||
struct bio *next = bio->bi_next;
|
||||
bio->bi_next = NULL;
|
||||
generic_make_request(bio);
|
||||
bio = next;
|
||||
}
|
||||
kfree(plug);
|
||||
}
|
||||
|
||||
static void make_request(struct mddev *mddev, struct bio * bio)
|
||||
{
|
||||
struct r10conf *conf = mddev->private;
|
||||
|
@ -1061,8 +1104,12 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
|||
const int rw = bio_data_dir(bio);
|
||||
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
|
||||
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
|
||||
const unsigned long do_discard = (bio->bi_rw
|
||||
& (REQ_DISCARD | REQ_SECURE));
|
||||
unsigned long flags;
|
||||
struct md_rdev *blocked_rdev;
|
||||
struct blk_plug_cb *cb;
|
||||
struct raid10_plug_cb *plug = NULL;
|
||||
int sectors_handled;
|
||||
int max_sectors;
|
||||
int sectors;
|
||||
|
@ -1081,7 +1128,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
|||
|| conf->prev.near_copies < conf->prev.raid_disks))) {
|
||||
struct bio_pair *bp;
|
||||
/* Sanity check -- queue functions should prevent this happening */
|
||||
if (bio->bi_vcnt != 1 ||
|
||||
if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
|
||||
bio->bi_idx != 0)
|
||||
goto bad_map;
|
||||
/* This is a one page bio that upper layers
|
||||
|
@ -1410,15 +1457,26 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
|||
conf->mirrors[d].rdev));
|
||||
mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
|
||||
mbio->bi_end_io = raid10_end_write_request;
|
||||
mbio->bi_rw = WRITE | do_sync | do_fua;
|
||||
mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
|
||||
mbio->bi_private = r10_bio;
|
||||
|
||||
atomic_inc(&r10_bio->remaining);
|
||||
|
||||
cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
|
||||
if (cb)
|
||||
plug = container_of(cb, struct raid10_plug_cb, cb);
|
||||
else
|
||||
plug = NULL;
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
conf->pending_count++;
|
||||
if (plug) {
|
||||
bio_list_add(&plug->pending, mbio);
|
||||
plug->pending_cnt++;
|
||||
} else {
|
||||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
conf->pending_count++;
|
||||
}
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
if (!mddev_check_plugged(mddev))
|
||||
if (!plug)
|
||||
md_wakeup_thread(mddev->thread);
|
||||
|
||||
if (!r10_bio->devs[i].repl_bio)
|
||||
|
@ -1439,7 +1497,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
|||
conf->mirrors[d].replacement));
|
||||
mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
|
||||
mbio->bi_end_io = raid10_end_write_request;
|
||||
mbio->bi_rw = WRITE | do_sync | do_fua;
|
||||
mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
|
||||
mbio->bi_private = r10_bio;
|
||||
|
||||
atomic_inc(&r10_bio->remaining);
|
||||
|
@ -1638,7 +1696,7 @@ static int raid10_spare_active(struct mddev *mddev)
|
|||
&& !test_bit(Faulty, &tmp->rdev->flags)
|
||||
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
|
||||
count++;
|
||||
sysfs_notify_dirent(tmp->rdev->sysfs_state);
|
||||
sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
|
||||
}
|
||||
}
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
|
@ -1725,6 +1783,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
|
|||
clear_bit(Unmerged, &rdev->flags);
|
||||
}
|
||||
md_integrity_add_rdev(rdev, mddev);
|
||||
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
||||
|
||||
print_conf(conf);
|
||||
return err;
|
||||
}
|
||||
|
@ -1952,7 +2013,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
|
|||
break;
|
||||
if (j == vcnt)
|
||||
continue;
|
||||
mddev->resync_mismatches += r10_bio->sectors;
|
||||
atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
|
||||
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
|
||||
/* Don't fix anything. */
|
||||
continue;
|
||||
|
@ -2673,8 +2734,9 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
|
|||
}
|
||||
}
|
||||
|
||||
static void raid10d(struct mddev *mddev)
|
||||
static void raid10d(struct md_thread *thread)
|
||||
{
|
||||
struct mddev *mddev = thread->mddev;
|
||||
struct r10bio *r10_bio;
|
||||
unsigned long flags;
|
||||
struct r10conf *conf = mddev->private;
|
||||
|
@ -3158,7 +3220,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||
else {
|
||||
bad_sectors -= (sector - first_bad);
|
||||
if (max_sync > bad_sectors)
|
||||
max_sync = max_sync;
|
||||
max_sync = bad_sectors;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -3482,6 +3544,7 @@ static int run(struct mddev *mddev)
|
|||
sector_t size;
|
||||
sector_t min_offset_diff = 0;
|
||||
int first = 1;
|
||||
bool discard_supported = false;
|
||||
|
||||
if (mddev->private == NULL) {
|
||||
conf = setup_conf(mddev);
|
||||
|
@ -3498,6 +3561,8 @@ static int run(struct mddev *mddev)
|
|||
|
||||
chunk_size = mddev->chunk_sectors << 9;
|
||||
if (mddev->queue) {
|
||||
blk_queue_max_discard_sectors(mddev->queue,
|
||||
mddev->chunk_sectors);
|
||||
blk_queue_io_min(mddev->queue, chunk_size);
|
||||
if (conf->geo.raid_disks % conf->geo.near_copies)
|
||||
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
|
||||
|
@ -3543,8 +3608,16 @@ static int run(struct mddev *mddev)
|
|||
rdev->data_offset << 9);
|
||||
|
||||
disk->head_position = 0;
|
||||
|
||||
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
|
||||
discard_supported = true;
|
||||
}
|
||||
|
||||
if (discard_supported)
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
||||
else
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
||||
|
||||
/* need to check that every block has at least one working mirror */
|
||||
if (!enough(conf, -1)) {
|
||||
printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
|
||||
|
|
|
@ -551,6 +551,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
|
|||
rw = WRITE_FUA;
|
||||
else
|
||||
rw = WRITE;
|
||||
if (test_bit(R5_Discard, &sh->dev[i].flags))
|
||||
rw |= REQ_DISCARD;
|
||||
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
|
||||
rw = READ;
|
||||
else if (test_and_clear_bit(R5_WantReplace,
|
||||
|
@ -1174,8 +1176,11 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
|
|||
set_bit(R5_WantFUA, &dev->flags);
|
||||
if (wbi->bi_rw & REQ_SYNC)
|
||||
set_bit(R5_SyncIO, &dev->flags);
|
||||
tx = async_copy_data(1, wbi, dev->page,
|
||||
dev->sector, tx);
|
||||
if (wbi->bi_rw & REQ_DISCARD)
|
||||
set_bit(R5_Discard, &dev->flags);
|
||||
else
|
||||
tx = async_copy_data(1, wbi, dev->page,
|
||||
dev->sector, tx);
|
||||
wbi = r5_next_bio(wbi, dev->sector);
|
||||
}
|
||||
}
|
||||
|
@ -1191,7 +1196,7 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
|
|||
int pd_idx = sh->pd_idx;
|
||||
int qd_idx = sh->qd_idx;
|
||||
int i;
|
||||
bool fua = false, sync = false;
|
||||
bool fua = false, sync = false, discard = false;
|
||||
|
||||
pr_debug("%s: stripe %llu\n", __func__,
|
||||
(unsigned long long)sh->sector);
|
||||
|
@ -1199,13 +1204,15 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
|
|||
for (i = disks; i--; ) {
|
||||
fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
|
||||
sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
|
||||
discard |= test_bit(R5_Discard, &sh->dev[i].flags);
|
||||
}
|
||||
|
||||
for (i = disks; i--; ) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
|
||||
if (dev->written || i == pd_idx || i == qd_idx) {
|
||||
set_bit(R5_UPTODATE, &dev->flags);
|
||||
if (!discard)
|
||||
set_bit(R5_UPTODATE, &dev->flags);
|
||||
if (fua)
|
||||
set_bit(R5_WantFUA, &dev->flags);
|
||||
if (sync)
|
||||
|
@ -1241,6 +1248,18 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
|
|||
pr_debug("%s: stripe %llu\n", __func__,
|
||||
(unsigned long long)sh->sector);
|
||||
|
||||
for (i = 0; i < sh->disks; i++) {
|
||||
if (pd_idx == i)
|
||||
continue;
|
||||
if (!test_bit(R5_Discard, &sh->dev[i].flags))
|
||||
break;
|
||||
}
|
||||
if (i >= sh->disks) {
|
||||
atomic_inc(&sh->count);
|
||||
set_bit(R5_Discard, &sh->dev[pd_idx].flags);
|
||||
ops_complete_reconstruct(sh);
|
||||
return;
|
||||
}
|
||||
/* check if prexor is active which means only process blocks
|
||||
* that are part of a read-modify-write (written)
|
||||
*/
|
||||
|
@ -1285,10 +1304,24 @@ ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
|
|||
{
|
||||
struct async_submit_ctl submit;
|
||||
struct page **blocks = percpu->scribble;
|
||||
int count;
|
||||
int count, i;
|
||||
|
||||
pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
|
||||
|
||||
for (i = 0; i < sh->disks; i++) {
|
||||
if (sh->pd_idx == i || sh->qd_idx == i)
|
||||
continue;
|
||||
if (!test_bit(R5_Discard, &sh->dev[i].flags))
|
||||
break;
|
||||
}
|
||||
if (i >= sh->disks) {
|
||||
atomic_inc(&sh->count);
|
||||
set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
|
||||
set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
|
||||
ops_complete_reconstruct(sh);
|
||||
return;
|
||||
}
|
||||
|
||||
count = set_syndrome_sources(blocks, sh);
|
||||
|
||||
atomic_inc(&sh->count);
|
||||
|
@ -2408,11 +2441,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
|
|||
if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
|
||||
set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
|
||||
}
|
||||
spin_unlock_irq(&sh->stripe_lock);
|
||||
|
||||
pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
|
||||
(unsigned long long)(*bip)->bi_sector,
|
||||
(unsigned long long)sh->sector, dd_idx);
|
||||
spin_unlock_irq(&sh->stripe_lock);
|
||||
|
||||
if (conf->mddev->bitmap && firstwrite) {
|
||||
bitmap_startwrite(conf->mddev->bitmap, sh->sector,
|
||||
|
@ -2479,10 +2512,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
|||
bi = sh->dev[i].towrite;
|
||||
sh->dev[i].towrite = NULL;
|
||||
spin_unlock_irq(&sh->stripe_lock);
|
||||
if (bi) {
|
||||
s->to_write--;
|
||||
if (bi)
|
||||
bitmap_end = 1;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
|
@ -2524,11 +2555,12 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
|||
if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
|
||||
(!test_bit(R5_Insync, &sh->dev[i].flags) ||
|
||||
test_bit(R5_ReadError, &sh->dev[i].flags))) {
|
||||
spin_lock_irq(&sh->stripe_lock);
|
||||
bi = sh->dev[i].toread;
|
||||
sh->dev[i].toread = NULL;
|
||||
spin_unlock_irq(&sh->stripe_lock);
|
||||
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
if (bi) s->to_read--;
|
||||
while (bi && bi->bi_sector <
|
||||
sh->dev[i].sector + STRIPE_SECTORS) {
|
||||
struct bio *nextbi =
|
||||
|
@ -2741,7 +2773,8 @@ static void handle_stripe_clean_event(struct r5conf *conf,
|
|||
if (sh->dev[i].written) {
|
||||
dev = &sh->dev[i];
|
||||
if (!test_bit(R5_LOCKED, &dev->flags) &&
|
||||
test_bit(R5_UPTODATE, &dev->flags)) {
|
||||
(test_bit(R5_UPTODATE, &dev->flags) ||
|
||||
test_and_clear_bit(R5_Discard, &dev->flags))) {
|
||||
/* We can return any write requests */
|
||||
struct bio *wbi, *wbi2;
|
||||
pr_debug("Return write for disc %d\n", i);
|
||||
|
@ -2775,12 +2808,25 @@ static void handle_stripe_dirtying(struct r5conf *conf,
|
|||
int disks)
|
||||
{
|
||||
int rmw = 0, rcw = 0, i;
|
||||
if (conf->max_degraded == 2) {
|
||||
/* RAID6 requires 'rcw' in current implementation
|
||||
* Calculate the real rcw later - for now fake it
|
||||
sector_t recovery_cp = conf->mddev->recovery_cp;
|
||||
|
||||
/* RAID6 requires 'rcw' in current implementation.
|
||||
* Otherwise, check whether resync is now happening or should start.
|
||||
* If yes, then the array is dirty (after unclean shutdown or
|
||||
* initial creation), so parity in some stripes might be inconsistent.
|
||||
* In this case, we need to always do reconstruct-write, to ensure
|
||||
* that in case of drive failure or read-error correction, we
|
||||
* generate correct data from the parity.
|
||||
*/
|
||||
if (conf->max_degraded == 2 ||
|
||||
(recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
|
||||
/* Calculate the real rcw later - for now make it
|
||||
* look like rcw is cheaper
|
||||
*/
|
||||
rcw = 1; rmw = 2;
|
||||
pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
|
||||
conf->max_degraded, (unsigned long long)recovery_cp,
|
||||
(unsigned long long)sh->sector);
|
||||
} else for (i = disks; i--; ) {
|
||||
/* would I have to read this buffer for read_modify_write */
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
|
@ -2932,7 +2978,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
|
|||
*/
|
||||
set_bit(STRIPE_INSYNC, &sh->state);
|
||||
else {
|
||||
conf->mddev->resync_mismatches += STRIPE_SECTORS;
|
||||
atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
|
||||
if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
|
||||
/* don't try to repair!! */
|
||||
set_bit(STRIPE_INSYNC, &sh->state);
|
||||
|
@ -3084,7 +3130,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
|
|||
*/
|
||||
}
|
||||
} else {
|
||||
conf->mddev->resync_mismatches += STRIPE_SECTORS;
|
||||
atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
|
||||
if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
|
||||
/* don't try to repair!! */
|
||||
set_bit(STRIPE_INSYNC, &sh->state);
|
||||
|
@ -3459,10 +3505,12 @@ static void handle_stripe(struct stripe_head *sh)
|
|||
if (s.written &&
|
||||
(s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
|
||||
&& !test_bit(R5_LOCKED, &pdev->flags)
|
||||
&& test_bit(R5_UPTODATE, &pdev->flags)))) &&
|
||||
&& (test_bit(R5_UPTODATE, &pdev->flags) ||
|
||||
test_bit(R5_Discard, &pdev->flags))))) &&
|
||||
(s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
|
||||
&& !test_bit(R5_LOCKED, &qdev->flags)
|
||||
&& test_bit(R5_UPTODATE, &qdev->flags)))))
|
||||
&& (test_bit(R5_UPTODATE, &qdev->flags) ||
|
||||
test_bit(R5_Discard, &qdev->flags))))))
|
||||
handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
|
||||
|
||||
/* Now we might consider reading some blocks, either to check/generate
|
||||
|
@ -3489,9 +3537,11 @@ static void handle_stripe(struct stripe_head *sh)
|
|||
/* All the 'written' buffers and the parity block are ready to
|
||||
* be written back to disk
|
||||
*/
|
||||
BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
|
||||
BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
|
||||
!test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
|
||||
BUG_ON(sh->qd_idx >= 0 &&
|
||||
!test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
|
||||
!test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
|
||||
!test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
|
||||
for (i = disks; i--; ) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
if (test_bit(R5_LOCKED, &dev->flags) &&
|
||||
|
@ -4072,6 +4122,88 @@ static void release_stripe_plug(struct mddev *mddev,
|
|||
release_stripe(sh);
|
||||
}
|
||||
|
||||
static void make_discard_request(struct mddev *mddev, struct bio *bi)
|
||||
{
|
||||
struct r5conf *conf = mddev->private;
|
||||
sector_t logical_sector, last_sector;
|
||||
struct stripe_head *sh;
|
||||
int remaining;
|
||||
int stripe_sectors;
|
||||
|
||||
if (mddev->reshape_position != MaxSector)
|
||||
/* Skip discard while reshape is happening */
|
||||
return;
|
||||
|
||||
logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
|
||||
last_sector = bi->bi_sector + (bi->bi_size>>9);
|
||||
|
||||
bi->bi_next = NULL;
|
||||
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
|
||||
|
||||
stripe_sectors = conf->chunk_sectors *
|
||||
(conf->raid_disks - conf->max_degraded);
|
||||
logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
|
||||
stripe_sectors);
|
||||
sector_div(last_sector, stripe_sectors);
|
||||
|
||||
logical_sector *= conf->chunk_sectors;
|
||||
last_sector *= conf->chunk_sectors;
|
||||
|
||||
for (; logical_sector < last_sector;
|
||||
logical_sector += STRIPE_SECTORS) {
|
||||
DEFINE_WAIT(w);
|
||||
int d;
|
||||
again:
|
||||
sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
|
||||
prepare_to_wait(&conf->wait_for_overlap, &w,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
spin_lock_irq(&sh->stripe_lock);
|
||||
for (d = 0; d < conf->raid_disks; d++) {
|
||||
if (d == sh->pd_idx || d == sh->qd_idx)
|
||||
continue;
|
||||
if (sh->dev[d].towrite || sh->dev[d].toread) {
|
||||
set_bit(R5_Overlap, &sh->dev[d].flags);
|
||||
spin_unlock_irq(&sh->stripe_lock);
|
||||
release_stripe(sh);
|
||||
schedule();
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
finish_wait(&conf->wait_for_overlap, &w);
|
||||
for (d = 0; d < conf->raid_disks; d++) {
|
||||
if (d == sh->pd_idx || d == sh->qd_idx)
|
||||
continue;
|
||||
sh->dev[d].towrite = bi;
|
||||
set_bit(R5_OVERWRITE, &sh->dev[d].flags);
|
||||
raid5_inc_bi_active_stripes(bi);
|
||||
}
|
||||
spin_unlock_irq(&sh->stripe_lock);
|
||||
if (conf->mddev->bitmap) {
|
||||
for (d = 0;
|
||||
d < conf->raid_disks - conf->max_degraded;
|
||||
d++)
|
||||
bitmap_startwrite(mddev->bitmap,
|
||||
sh->sector,
|
||||
STRIPE_SECTORS,
|
||||
0);
|
||||
sh->bm_seq = conf->seq_flush + 1;
|
||||
set_bit(STRIPE_BIT_DELAY, &sh->state);
|
||||
}
|
||||
|
||||
set_bit(STRIPE_HANDLE, &sh->state);
|
||||
clear_bit(STRIPE_DELAYED, &sh->state);
|
||||
if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
|
||||
atomic_inc(&conf->preread_active_stripes);
|
||||
release_stripe_plug(mddev, sh);
|
||||
}
|
||||
|
||||
remaining = raid5_dec_bi_active_stripes(bi);
|
||||
if (remaining == 0) {
|
||||
md_write_end(mddev);
|
||||
bio_endio(bi, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void make_request(struct mddev *mddev, struct bio * bi)
|
||||
{
|
||||
struct r5conf *conf = mddev->private;
|
||||
|
@ -4094,6 +4226,11 @@ static void make_request(struct mddev *mddev, struct bio * bi)
|
|||
chunk_aligned_read(mddev,bi))
|
||||
return;
|
||||
|
||||
if (unlikely(bi->bi_rw & REQ_DISCARD)) {
|
||||
make_discard_request(mddev, bi);
|
||||
return;
|
||||
}
|
||||
|
||||
logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
|
||||
last_sector = bi->bi_sector + (bi->bi_size>>9);
|
||||
bi->bi_next = NULL;
|
||||
|
@ -4630,8 +4767,9 @@ static int handle_active_stripes(struct r5conf *conf)
|
|||
* During the scan, completed stripes are saved for us by the interrupt
|
||||
* handler, so that they will not have to wait for our next wakeup.
|
||||
*/
|
||||
static void raid5d(struct mddev *mddev)
|
||||
static void raid5d(struct md_thread *thread)
|
||||
{
|
||||
struct mddev *mddev = thread->mddev;
|
||||
struct r5conf *conf = mddev->private;
|
||||
int handled;
|
||||
struct blk_plug plug;
|
||||
|
@ -5366,6 +5504,7 @@ static int run(struct mddev *mddev)
|
|||
|
||||
if (mddev->queue) {
|
||||
int chunk_size;
|
||||
bool discard_supported = true;
|
||||
/* read-ahead size must cover two whole stripes, which
|
||||
* is 2 * (datadisks) * chunksize where 'n' is the
|
||||
* number of raid devices
|
||||
|
@ -5385,13 +5524,48 @@ static int run(struct mddev *mddev)
|
|||
blk_queue_io_min(mddev->queue, chunk_size);
|
||||
blk_queue_io_opt(mddev->queue, chunk_size *
|
||||
(conf->raid_disks - conf->max_degraded));
|
||||
/*
|
||||
* We can only discard a whole stripe. It doesn't make sense to
|
||||
* discard data disk but write parity disk
|
||||
*/
|
||||
stripe = stripe * PAGE_SIZE;
|
||||
mddev->queue->limits.discard_alignment = stripe;
|
||||
mddev->queue->limits.discard_granularity = stripe;
|
||||
/*
|
||||
* unaligned part of discard request will be ignored, so can't
|
||||
* guarantee discard_zerors_data
|
||||
*/
|
||||
mddev->queue->limits.discard_zeroes_data = 0;
|
||||
|
||||
rdev_for_each(rdev, mddev) {
|
||||
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
||||
rdev->data_offset << 9);
|
||||
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
||||
rdev->new_data_offset << 9);
|
||||
/*
|
||||
* discard_zeroes_data is required, otherwise data
|
||||
* could be lost. Consider a scenario: discard a stripe
|
||||
* (the stripe could be inconsistent if
|
||||
* discard_zeroes_data is 0); write one disk of the
|
||||
* stripe (the stripe could be inconsistent again
|
||||
* depending on which disks are used to calculate
|
||||
* parity); the disk is broken; The stripe data of this
|
||||
* disk is lost.
|
||||
*/
|
||||
if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
|
||||
!bdev_get_queue(rdev->bdev)->
|
||||
limits.discard_zeroes_data)
|
||||
discard_supported = false;
|
||||
}
|
||||
|
||||
if (discard_supported &&
|
||||
mddev->queue->limits.max_discard_sectors >= stripe &&
|
||||
mddev->queue->limits.discard_granularity >= stripe)
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
|
||||
mddev->queue);
|
||||
else
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
|
||||
mddev->queue);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -5702,7 +5876,8 @@ static int check_reshape(struct mddev *mddev)
|
|||
if (!check_stripe_cache(mddev))
|
||||
return -ENOSPC;
|
||||
|
||||
return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
|
||||
return resize_stripes(conf, (conf->previous_raid_disks
|
||||
+ mddev->delta_disks));
|
||||
}
|
||||
|
||||
static int raid5_start_reshape(struct mddev *mddev)
|
||||
|
|
|
@ -298,6 +298,7 @@ enum r5dev_flags {
|
|||
R5_WantReplace, /* We need to update the replacement, we have read
|
||||
* data in, and now is a good time to write it out.
|
||||
*/
|
||||
R5_Discard, /* Discard the stripe */
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue