rbd: avoid a deadlock on header_rwsem when flushing notifies

rbd_unregister_watch() flushes notifies and therefore cannot be called
under header_rwsem because a header update notify takes header_rwsem to
synchronize with "rbd map".  If mapping an image fails after the watch
is established and a header update notify sneaks in, we deadlock when
erroring out from rbd_dev_image_probe().

Move watch registration and unregistration out of the critical section.
The only reason they were put there was to make header_rwsem management
slightly more obvious.

Fixes: 811c668877 ("rbd: fix rbd map vs notify races")
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Reviewed-by: Jason Dillaman <dillaman@redhat.com>
This commit is contained in:
Ilya Dryomov 2020-03-13 11:20:51 +01:00
parent 8f3d9f3542
commit 0e4e1de5b6
1 changed files with 13 additions and 4 deletions

View File

@ -4527,6 +4527,10 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev)
cancel_work_sync(&rbd_dev->unlock_work); cancel_work_sync(&rbd_dev->unlock_work);
} }
/*
* header_rwsem must not be held to avoid a deadlock with
* rbd_dev_refresh() when flushing notifies.
*/
static void rbd_unregister_watch(struct rbd_device *rbd_dev) static void rbd_unregister_watch(struct rbd_device *rbd_dev)
{ {
cancel_tasks_sync(rbd_dev); cancel_tasks_sync(rbd_dev);
@ -6907,6 +6911,9 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
* device. If this image is the one being mapped (i.e., not a * device. If this image is the one being mapped (i.e., not a
* parent), initiate a watch on its header object before using that * parent), initiate a watch on its header object before using that
* object to get detailed information about the rbd image. * object to get detailed information about the rbd image.
*
* On success, returns with header_rwsem held for write if called
* with @depth == 0.
*/ */
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
{ {
@ -6936,6 +6943,9 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
} }
} }
if (!depth)
down_write(&rbd_dev->header_rwsem);
ret = rbd_dev_header_info(rbd_dev); ret = rbd_dev_header_info(rbd_dev);
if (ret) { if (ret) {
if (ret == -ENOENT && !need_watch) if (ret == -ENOENT && !need_watch)
@ -6987,6 +6997,8 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
err_out_probe: err_out_probe:
rbd_dev_unprobe(rbd_dev); rbd_dev_unprobe(rbd_dev);
err_out_watch: err_out_watch:
if (!depth)
up_write(&rbd_dev->header_rwsem);
if (need_watch) if (need_watch)
rbd_unregister_watch(rbd_dev); rbd_unregister_watch(rbd_dev);
err_out_format: err_out_format:
@ -7050,12 +7062,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
goto err_out_rbd_dev; goto err_out_rbd_dev;
} }
down_write(&rbd_dev->header_rwsem);
rc = rbd_dev_image_probe(rbd_dev, 0); rc = rbd_dev_image_probe(rbd_dev, 0);
if (rc < 0) { if (rc < 0)
up_write(&rbd_dev->header_rwsem);
goto err_out_rbd_dev; goto err_out_rbd_dev;
}
if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) { if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
rbd_warn(rbd_dev, "alloc_size adjusted to %u", rbd_warn(rbd_dev, "alloc_size adjusted to %u",