- A request-based DM fix to not use a waitqueue to wait for blk-mq IO

completion because doing so is racey.
 
 - A couple more DM zoned target fixes to address issues introduced
   during the 5.8 cycle.
 
 - A DM core fix to use proper interface to cleanup DM's static flush
   bio.
 
 - A DM core fix to prevent mm recursion during memory allocation
   needed by dm_kobject_uevent.
 -----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCAAxFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAl8HcSwTHHNuaXR6ZXJA
 cmVkaGF0LmNvbQAKCRDFI/EKLZ0DWnI/CACAKDstIdysUGrcLicFAeA4W8Myz+25
 B0AEJIBBOV5UWpWHjBk5mT/KSXB5hJ2N0TeaLx/LHq3UzAFgL6gWm+vvFNzAFlI1
 VbgrNhAklayMXZZV8u9JlM1dXIjI3JqRIzQOcRfSP2msVrPI0E1n1Gn/4dLG6Iip
 mkQtOj7wZ0drWtmj/FesL/FVAM/xuQjiKRLFYI/RGWLECCi4L52NPmBSNH6sGgh3
 YtiiaQihbpYEX4UAQblt6/fwCEmO9HK2oHdizTwlHxQFoqFZoecFg53MjkPDu/sx
 /m/R+NseATXPhlqeQALGuHGf1UX9EZoj99Rq7+q36Q+78/xCp2UsnLpx
 =N3rW
 -----END PGP SIGNATURE-----

Merge tag 'for-5.8/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - A request-based DM fix to not use a waitqueue to wait for blk-mq IO
   completion because doing so is racey.

 - A couple more DM zoned target fixes to address issues introduced
   during the 5.8 cycle.

 - A DM core fix to use proper interface to cleanup DM's static flush
   bio.

 - A DM core fix to prevent mm recursion during memory allocation needed
   by dm_kobject_uevent.

* tag 'for-5.8/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: use noio when sending kobject event
  dm zoned: Fix zone reclaim trigger
  dm zoned: fix unused but set variable warnings
  dm writecache: reject asynchronous pmem devices
  dm: use bio_uninit instead of bio_disassociate_blkg
  dm: do not use waitqueue for request-based DM
This commit is contained in:
Linus Torvalds 2020-07-09 13:14:51 -07:00
commit 2a89b99f58
6 changed files with 71 additions and 49 deletions

View File

@ -146,10 +146,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md)
{
/* nudge anyone waiting on suspend queue */
if (unlikely(wq_has_sleeper(&md->wait)))
wake_up(&md->wait);
/*
* dm_put() must be at the end of this function. See the comment above
*/

View File

@ -2266,6 +2266,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
if (WC_MODE_PMEM(wc)) {
if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
r = -EOPNOTSUPP;
ti->error = "Asynchronous persistent memory not supported as pmem cache";
goto bad;
}
r = persistent_memory_claim(wc);
if (r) {
ti->error = "Unable to map persistent memory for cache";

View File

@ -2217,8 +2217,15 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
{
struct list_head *list;
struct dm_zone *zone;
int i = 0;
int i;
/* Schedule reclaim to ensure free zones are available */
if (!(flags & DMZ_ALLOC_RECLAIM)) {
for (i = 0; i < zmd->nr_devs; i++)
dmz_schedule_reclaim(zmd->dev[i].reclaim);
}
i = 0;
again:
if (flags & DMZ_ALLOC_CACHE)
list = &zmd->unmap_cache_list;

View File

@ -456,6 +456,8 @@ static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
}
if (nr_unmap <= 1)
return 0;
return nr_unmap * 100 / nr_zones;
}
@ -501,7 +503,7 @@ static void dmz_reclaim_work(struct work_struct *work)
{
struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
struct dmz_metadata *zmd = zrc->metadata;
unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0;
unsigned int p_unmap;
int ret;
if (dmz_dev_is_dying(zmd))
@ -527,9 +529,6 @@ static void dmz_reclaim_work(struct work_struct *work)
zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
}
nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
dmz_metadata_label(zmd), zrc->dev_idx,
zrc->kc_throttle.throttle,

View File

@ -400,15 +400,7 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
struct dmz_metadata *zmd = dmz->metadata;
struct dm_zone *zone;
int i, ret;
/*
* Write may trigger a zone allocation. So make sure the
* allocation can succeed.
*/
if (bio_op(bio) == REQ_OP_WRITE)
for (i = 0; i < dmz->nr_ddevs; i++)
dmz_schedule_reclaim(dmz->dev[i].reclaim);
int ret;
dmz_lock_metadata(zmd);

View File

@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
@ -654,28 +655,6 @@ static void free_tio(struct dm_target_io *tio)
bio_put(&tio->clone);
}
static bool md_in_flight_bios(struct mapped_device *md)
{
int cpu;
struct hd_struct *part = &dm_disk(md)->part0;
long sum = 0;
for_each_possible_cpu(cpu) {
sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
}
return sum != 0;
}
static bool md_in_flight(struct mapped_device *md)
{
if (queue_is_mq(md->queue))
return blk_mq_queue_inflight(md->queue);
else
return md_in_flight_bios(md);
}
u64 dm_start_time_ns_from_clone(struct bio *bio)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
@ -1465,9 +1444,6 @@ static int __send_empty_flush(struct clone_info *ci)
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
bio_disassociate_blkg(ci->bio);
return 0;
}
@ -1655,6 +1631,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else if (op_is_zone_mgmt(bio_op(bio))) {
ci.bio = bio;
@ -1729,6 +1706,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else {
struct dm_target_io *tio;
@ -2470,15 +2448,29 @@ void dm_put(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_put);
static int dm_wait_for_completion(struct mapped_device *md, long task_state)
static bool md_in_flight_bios(struct mapped_device *md)
{
int cpu;
struct hd_struct *part = &dm_disk(md)->part0;
long sum = 0;
for_each_possible_cpu(cpu) {
sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
}
return sum != 0;
}
static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
{
int r = 0;
DEFINE_WAIT(wait);
while (1) {
while (true) {
prepare_to_wait(&md->wait, &wait, task_state);
if (!md_in_flight(md))
if (!md_in_flight_bios(md))
break;
if (signal_pending_state(task_state, current)) {
@ -2493,6 +2485,28 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state)
return r;
}
static int dm_wait_for_completion(struct mapped_device *md, long task_state)
{
int r = 0;
if (!queue_is_mq(md->queue))
return dm_wait_for_bios_completion(md, task_state);
while (true) {
if (!blk_mq_queue_inflight(md->queue))
break;
if (signal_pending_state(task_state, current)) {
r = -EINTR;
break;
}
msleep(5);
}
return r;
}
/*
* Process the deferred bios
*/
@ -2926,17 +2940,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie)
{
int r;
unsigned noio_flag;
char udev_cookie[DM_COOKIE_LENGTH];
char *envp[] = { udev_cookie, NULL };
noio_flag = memalloc_noio_save();
if (!cookie)
return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
else {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
action, envp);
}
memalloc_noio_restore(noio_flag);
return r;
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)