Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md into for-linus
Pull the pending 4.21 changes for md from Shaohua. * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md: md: fix raid10 hang issue caused by barrier raid10: refactor common wait code from regular read/write request md: remvoe redundant condition check lib/raid6: add option to skip algo benchmarking lib/raid6: sort algos in rough performance order lib/raid6: check for assembler SSSE3 support lib/raid6: avoid __attribute_const__ redefinition lib/raid6: add missing include for raid6test md: remove set but not used variable 'bi_rdev'
This commit is contained in:
commit
dc629c211c
|
@ -2147,14 +2147,12 @@ EXPORT_SYMBOL(md_integrity_register);
|
||||||
*/
|
*/
|
||||||
int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
|
int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
|
||||||
{
|
{
|
||||||
struct blk_integrity *bi_rdev;
|
|
||||||
struct blk_integrity *bi_mddev;
|
struct blk_integrity *bi_mddev;
|
||||||
char name[BDEVNAME_SIZE];
|
char name[BDEVNAME_SIZE];
|
||||||
|
|
||||||
if (!mddev->gendisk)
|
if (!mddev->gendisk)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
bi_rdev = bdev_get_integrity(rdev->bdev);
|
|
||||||
bi_mddev = blk_get_integrity(mddev->gendisk);
|
bi_mddev = blk_get_integrity(mddev->gendisk);
|
||||||
|
|
||||||
if (!bi_mddev) /* nothing to do */
|
if (!bi_mddev) /* nothing to do */
|
||||||
|
@ -5693,14 +5691,10 @@ int md_run(struct mddev *mddev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
abort:
|
abort:
|
||||||
if (mddev->flush_bio_pool) {
|
|
||||||
mempool_destroy(mddev->flush_bio_pool);
|
mempool_destroy(mddev->flush_bio_pool);
|
||||||
mddev->flush_bio_pool = NULL;
|
mddev->flush_bio_pool = NULL;
|
||||||
}
|
|
||||||
if (mddev->flush_pool){
|
|
||||||
mempool_destroy(mddev->flush_pool);
|
mempool_destroy(mddev->flush_pool);
|
||||||
mddev->flush_pool = NULL;
|
mddev->flush_pool = NULL;
|
||||||
}
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1124,6 +1124,29 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||||
kfree(plug);
|
kfree(plug);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 1. Register the new request and wait if the reconstruction thread has put
|
||||||
|
* up a bar for new requests. Continue immediately if no resync is active
|
||||||
|
* currently.
|
||||||
|
* 2. If IO spans the reshape position. Need to wait for reshape to pass.
|
||||||
|
*/
|
||||||
|
static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
|
||||||
|
struct bio *bio, sector_t sectors)
|
||||||
|
{
|
||||||
|
wait_barrier(conf);
|
||||||
|
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||||
|
bio->bi_iter.bi_sector < conf->reshape_progress &&
|
||||||
|
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
|
||||||
|
raid10_log(conf->mddev, "wait reshape");
|
||||||
|
allow_barrier(conf);
|
||||||
|
wait_event(conf->wait_barrier,
|
||||||
|
conf->reshape_progress <= bio->bi_iter.bi_sector ||
|
||||||
|
conf->reshape_progress >= bio->bi_iter.bi_sector +
|
||||||
|
sectors);
|
||||||
|
wait_barrier(conf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
||||||
struct r10bio *r10_bio)
|
struct r10bio *r10_bio)
|
||||||
{
|
{
|
||||||
|
@ -1132,7 +1155,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
||||||
const int op = bio_op(bio);
|
const int op = bio_op(bio);
|
||||||
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
|
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
|
||||||
int max_sectors;
|
int max_sectors;
|
||||||
sector_t sectors;
|
|
||||||
struct md_rdev *rdev;
|
struct md_rdev *rdev;
|
||||||
char b[BDEVNAME_SIZE];
|
char b[BDEVNAME_SIZE];
|
||||||
int slot = r10_bio->read_slot;
|
int slot = r10_bio->read_slot;
|
||||||
|
@ -1166,30 +1188,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* Register the new request and wait if the reconstruction
|
|
||||||
* thread has put up a bar for new requests.
|
|
||||||
* Continue immediately if no resync is active currently.
|
|
||||||
*/
|
|
||||||
wait_barrier(conf);
|
|
||||||
|
|
||||||
sectors = r10_bio->sectors;
|
|
||||||
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
|
||||||
bio->bi_iter.bi_sector < conf->reshape_progress &&
|
|
||||||
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
|
|
||||||
/*
|
|
||||||
* IO spans the reshape position. Need to wait for reshape to
|
|
||||||
* pass
|
|
||||||
*/
|
|
||||||
raid10_log(conf->mddev, "wait reshape");
|
|
||||||
allow_barrier(conf);
|
|
||||||
wait_event(conf->wait_barrier,
|
|
||||||
conf->reshape_progress <= bio->bi_iter.bi_sector ||
|
|
||||||
conf->reshape_progress >= bio->bi_iter.bi_sector +
|
|
||||||
sectors);
|
|
||||||
wait_barrier(conf);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
regular_request_wait(mddev, conf, bio, r10_bio->sectors);
|
||||||
rdev = read_balance(conf, r10_bio, &max_sectors);
|
rdev = read_balance(conf, r10_bio, &max_sectors);
|
||||||
if (!rdev) {
|
if (!rdev) {
|
||||||
if (err_rdev) {
|
if (err_rdev) {
|
||||||
|
@ -1209,7 +1209,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
||||||
struct bio *split = bio_split(bio, max_sectors,
|
struct bio *split = bio_split(bio, max_sectors,
|
||||||
gfp, &conf->bio_split);
|
gfp, &conf->bio_split);
|
||||||
bio_chain(split, bio);
|
bio_chain(split, bio);
|
||||||
|
allow_barrier(conf);
|
||||||
generic_make_request(bio);
|
generic_make_request(bio);
|
||||||
|
wait_barrier(conf);
|
||||||
bio = split;
|
bio = split;
|
||||||
r10_bio->master_bio = bio;
|
r10_bio->master_bio = bio;
|
||||||
r10_bio->sectors = max_sectors;
|
r10_bio->sectors = max_sectors;
|
||||||
|
@ -1332,30 +1334,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||||
finish_wait(&conf->wait_barrier, &w);
|
finish_wait(&conf->wait_barrier, &w);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Register the new request and wait if the reconstruction
|
|
||||||
* thread has put up a bar for new requests.
|
|
||||||
* Continue immediately if no resync is active currently.
|
|
||||||
*/
|
|
||||||
wait_barrier(conf);
|
|
||||||
|
|
||||||
sectors = r10_bio->sectors;
|
sectors = r10_bio->sectors;
|
||||||
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
regular_request_wait(mddev, conf, bio, sectors);
|
||||||
bio->bi_iter.bi_sector < conf->reshape_progress &&
|
|
||||||
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
|
|
||||||
/*
|
|
||||||
* IO spans the reshape position. Need to wait for reshape to
|
|
||||||
* pass
|
|
||||||
*/
|
|
||||||
raid10_log(conf->mddev, "wait reshape");
|
|
||||||
allow_barrier(conf);
|
|
||||||
wait_event(conf->wait_barrier,
|
|
||||||
conf->reshape_progress <= bio->bi_iter.bi_sector ||
|
|
||||||
conf->reshape_progress >= bio->bi_iter.bi_sector +
|
|
||||||
sectors);
|
|
||||||
wait_barrier(conf);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||||
(mddev->reshape_backwards
|
(mddev->reshape_backwards
|
||||||
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
|
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
|
||||||
|
@ -1514,7 +1494,9 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||||
struct bio *split = bio_split(bio, r10_bio->sectors,
|
struct bio *split = bio_split(bio, r10_bio->sectors,
|
||||||
GFP_NOIO, &conf->bio_split);
|
GFP_NOIO, &conf->bio_split);
|
||||||
bio_chain(split, bio);
|
bio_chain(split, bio);
|
||||||
|
allow_barrier(conf);
|
||||||
generic_make_request(bio);
|
generic_make_request(bio);
|
||||||
|
wait_barrier(conf);
|
||||||
bio = split;
|
bio = split;
|
||||||
r10_bio->master_bio = bio;
|
r10_bio->master_bio = bio;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,6 +35,7 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
|
||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
|
#include <sys/time.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
|
|
||||||
/* Not standard, but glibc defines it */
|
/* Not standard, but glibc defines it */
|
||||||
|
@ -52,7 +53,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
|
||||||
|
|
||||||
#define __init
|
#define __init
|
||||||
#define __exit
|
#define __exit
|
||||||
|
#ifndef __attribute_const__
|
||||||
# define __attribute_const__ __attribute__((const))
|
# define __attribute_const__ __attribute__((const))
|
||||||
|
#endif
|
||||||
#define noinline __attribute__((noinline))
|
#define noinline __attribute__((noinline))
|
||||||
|
|
||||||
#define preempt_enable()
|
#define preempt_enable()
|
||||||
|
@ -67,6 +70,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
|
||||||
#define MODULE_DESCRIPTION(desc)
|
#define MODULE_DESCRIPTION(desc)
|
||||||
#define subsys_initcall(x)
|
#define subsys_initcall(x)
|
||||||
#define module_exit(x)
|
#define module_exit(x)
|
||||||
|
|
||||||
|
#define IS_ENABLED(x) (x)
|
||||||
|
#define CONFIG_RAID6_PQ_BENCHMARK 1
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
/* Routine choices */
|
/* Routine choices */
|
||||||
|
|
|
@ -10,6 +10,14 @@ menu "Library routines"
|
||||||
config RAID6_PQ
|
config RAID6_PQ
|
||||||
tristate
|
tristate
|
||||||
|
|
||||||
|
config RAID6_PQ_BENCHMARK
|
||||||
|
bool "Automatically choose fastest RAID6 PQ functions"
|
||||||
|
depends on RAID6_PQ
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Benchmark all available RAID6 PQ functions on init and choose the
|
||||||
|
fastest one.
|
||||||
|
|
||||||
config BITREVERSE
|
config BITREVERSE
|
||||||
tristate
|
tristate
|
||||||
|
|
||||||
|
|
|
@ -34,64 +34,64 @@ struct raid6_calls raid6_call;
|
||||||
EXPORT_SYMBOL_GPL(raid6_call);
|
EXPORT_SYMBOL_GPL(raid6_call);
|
||||||
|
|
||||||
const struct raid6_calls * const raid6_algos[] = {
|
const struct raid6_calls * const raid6_algos[] = {
|
||||||
#if defined(__ia64__)
|
|
||||||
&raid6_intx16,
|
|
||||||
&raid6_intx32,
|
|
||||||
#endif
|
|
||||||
#if defined(__i386__) && !defined(__arch_um__)
|
#if defined(__i386__) && !defined(__arch_um__)
|
||||||
&raid6_mmxx1,
|
|
||||||
&raid6_mmxx2,
|
|
||||||
&raid6_sse1x1,
|
|
||||||
&raid6_sse1x2,
|
|
||||||
&raid6_sse2x1,
|
|
||||||
&raid6_sse2x2,
|
|
||||||
#ifdef CONFIG_AS_AVX2
|
|
||||||
&raid6_avx2x1,
|
|
||||||
&raid6_avx2x2,
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_AS_AVX512
|
#ifdef CONFIG_AS_AVX512
|
||||||
&raid6_avx512x1,
|
|
||||||
&raid6_avx512x2,
|
&raid6_avx512x2,
|
||||||
|
&raid6_avx512x1,
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_AS_AVX2
|
||||||
|
&raid6_avx2x2,
|
||||||
|
&raid6_avx2x1,
|
||||||
|
#endif
|
||||||
|
&raid6_sse2x2,
|
||||||
|
&raid6_sse2x1,
|
||||||
|
&raid6_sse1x2,
|
||||||
|
&raid6_sse1x1,
|
||||||
|
&raid6_mmxx2,
|
||||||
|
&raid6_mmxx1,
|
||||||
#endif
|
#endif
|
||||||
#if defined(__x86_64__) && !defined(__arch_um__)
|
#if defined(__x86_64__) && !defined(__arch_um__)
|
||||||
&raid6_sse2x1,
|
|
||||||
&raid6_sse2x2,
|
|
||||||
&raid6_sse2x4,
|
|
||||||
#ifdef CONFIG_AS_AVX2
|
|
||||||
&raid6_avx2x1,
|
|
||||||
&raid6_avx2x2,
|
|
||||||
&raid6_avx2x4,
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_AS_AVX512
|
#ifdef CONFIG_AS_AVX512
|
||||||
&raid6_avx512x1,
|
|
||||||
&raid6_avx512x2,
|
|
||||||
&raid6_avx512x4,
|
&raid6_avx512x4,
|
||||||
|
&raid6_avx512x2,
|
||||||
|
&raid6_avx512x1,
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_AS_AVX2
|
||||||
|
&raid6_avx2x4,
|
||||||
|
&raid6_avx2x2,
|
||||||
|
&raid6_avx2x1,
|
||||||
|
#endif
|
||||||
|
&raid6_sse2x4,
|
||||||
|
&raid6_sse2x2,
|
||||||
|
&raid6_sse2x1,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
&raid6_altivec1,
|
|
||||||
&raid6_altivec2,
|
|
||||||
&raid6_altivec4,
|
|
||||||
&raid6_altivec8,
|
|
||||||
&raid6_vpermxor1,
|
|
||||||
&raid6_vpermxor2,
|
|
||||||
&raid6_vpermxor4,
|
|
||||||
&raid6_vpermxor8,
|
&raid6_vpermxor8,
|
||||||
|
&raid6_vpermxor4,
|
||||||
|
&raid6_vpermxor2,
|
||||||
|
&raid6_vpermxor1,
|
||||||
|
&raid6_altivec8,
|
||||||
|
&raid6_altivec4,
|
||||||
|
&raid6_altivec2,
|
||||||
|
&raid6_altivec1,
|
||||||
#endif
|
#endif
|
||||||
#if defined(CONFIG_S390)
|
#if defined(CONFIG_S390)
|
||||||
&raid6_s390vx8,
|
&raid6_s390vx8,
|
||||||
#endif
|
#endif
|
||||||
&raid6_intx1,
|
|
||||||
&raid6_intx2,
|
|
||||||
&raid6_intx4,
|
|
||||||
&raid6_intx8,
|
|
||||||
#ifdef CONFIG_KERNEL_MODE_NEON
|
#ifdef CONFIG_KERNEL_MODE_NEON
|
||||||
&raid6_neonx1,
|
|
||||||
&raid6_neonx2,
|
|
||||||
&raid6_neonx4,
|
|
||||||
&raid6_neonx8,
|
&raid6_neonx8,
|
||||||
|
&raid6_neonx4,
|
||||||
|
&raid6_neonx2,
|
||||||
|
&raid6_neonx1,
|
||||||
#endif
|
#endif
|
||||||
|
#if defined(__ia64__)
|
||||||
|
&raid6_intx32,
|
||||||
|
&raid6_intx16,
|
||||||
|
#endif
|
||||||
|
&raid6_intx8,
|
||||||
|
&raid6_intx4,
|
||||||
|
&raid6_intx2,
|
||||||
|
&raid6_intx1,
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -163,6 +163,11 @@ static inline const struct raid6_calls *raid6_choose_gen(
|
||||||
if ((*algo)->valid && !(*algo)->valid())
|
if ((*algo)->valid && !(*algo)->valid())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
|
||||||
|
best = *algo;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
perf = 0;
|
perf = 0;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
|
|
@ -34,6 +34,9 @@ endif
|
||||||
|
|
||||||
ifeq ($(IS_X86),yes)
|
ifeq ($(IS_X86),yes)
|
||||||
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
|
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
|
||||||
|
CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \
|
||||||
|
gcc -c -x assembler - >&/dev/null && \
|
||||||
|
rm ./-.o && echo -DCONFIG_AS_SSSE3=1)
|
||||||
CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
|
CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
|
||||||
gcc -c -x assembler - >&/dev/null && \
|
gcc -c -x assembler - >&/dev/null && \
|
||||||
rm ./-.o && echo -DCONFIG_AS_AVX2=1)
|
rm ./-.o && echo -DCONFIG_AS_AVX2=1)
|
||||||
|
|
Loading…
Reference in New Issue