md/raid5: use ->lock to protect accessing raid5 sysfs attributes.
It is important that mddev->private isn't freed while a sysfs attribute function is accessing it. So use mddev->lock to protect the setting of ->private to NULL, and take that lock when checking ->private for NULL and de-referencing it in the sysfs access functions. This only applies to the read ('show') side of access. Write access will be handled separately. Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
parent
f97fcad38f
commit
7b1485bab9
|
@ -5354,11 +5354,14 @@ static void raid5d(struct md_thread *thread)
|
|||
static ssize_t
|
||||
raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
|
||||
{
|
||||
struct r5conf *conf = mddev->private;
|
||||
struct r5conf *conf;
|
||||
int ret = 0;
|
||||
spin_lock(&mddev->lock);
|
||||
conf = mddev->private;
|
||||
if (conf)
|
||||
return sprintf(page, "%d\n", conf->max_nr_stripes);
|
||||
else
|
||||
return 0;
|
||||
ret = sprintf(page, "%d\n", conf->max_nr_stripes);
|
||||
spin_unlock(&mddev->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -5422,11 +5425,14 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
|
|||
static ssize_t
|
||||
raid5_show_preread_threshold(struct mddev *mddev, char *page)
|
||||
{
|
||||
struct r5conf *conf = mddev->private;
|
||||
struct r5conf *conf;
|
||||
int ret = 0;
|
||||
spin_lock(&mddev->lock);
|
||||
conf = mddev->private;
|
||||
if (conf)
|
||||
return sprintf(page, "%d\n", conf->bypass_threshold);
|
||||
else
|
||||
return 0;
|
||||
ret = sprintf(page, "%d\n", conf->bypass_threshold);
|
||||
spin_unlock(&mddev->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -5456,11 +5462,14 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
|
|||
static ssize_t
|
||||
raid5_show_skip_copy(struct mddev *mddev, char *page)
|
||||
{
|
||||
struct r5conf *conf = mddev->private;
|
||||
struct r5conf *conf;
|
||||
int ret = 0;
|
||||
spin_lock(&mddev->lock);
|
||||
conf = mddev->private;
|
||||
if (conf)
|
||||
return sprintf(page, "%d\n", conf->skip_copy);
|
||||
else
|
||||
return 0;
|
||||
ret = sprintf(page, "%d\n", conf->skip_copy);
|
||||
spin_unlock(&mddev->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -5512,11 +5521,14 @@ raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
|
|||
static ssize_t
|
||||
raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
|
||||
{
|
||||
struct r5conf *conf = mddev->private;
|
||||
struct r5conf *conf;
|
||||
int ret = 0;
|
||||
spin_lock(&mddev->lock);
|
||||
conf = mddev->private;
|
||||
if (conf)
|
||||
return sprintf(page, "%d\n", conf->worker_cnt_per_group);
|
||||
else
|
||||
return 0;
|
||||
ret = sprintf(page, "%d\n", conf->worker_cnt_per_group);
|
||||
spin_unlock(&mddev->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int alloc_thread_groups(struct r5conf *conf, int cnt,
|
||||
|
|
Loading…
Reference in New Issue