From 178bd438af5c95deef5073416c60396f88e97ec9 Mon Sep 17 00:00:00 2001 From: Fam Zheng Date: Tue, 18 Apr 2017 22:30:43 +0800 Subject: [PATCH 1/2] block: Walk bs->children carefully in bdrv_drain_recurse The recursive bdrv_drain_recurse may run a block job completion BH that drops nodes. The coming changes will make that more likely and use-after-free would happen without this patch Stash the bs pointer and use bdrv_ref/bdrv_unref in addition to QLIST_FOREACH_SAFE to prevent such a case from happening. Since bdrv_unref accesses global state that is not protected by the AioContext lock, we cannot use bdrv_ref/bdrv_unref unconditionally. Fortunately the protection is not needed in IOThread because only main loop can modify a graph with the AioContext lock held. Signed-off-by: Fam Zheng Message-Id: <20170418143044.12187-2-famz@redhat.com> Reviewed-by: Jeff Cody Tested-by: Jeff Cody Signed-off-by: Fam Zheng --- block/io.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/block/io.c b/block/io.c index 8706bfa578..a7142e00e8 100644 --- a/block/io.c +++ b/block/io.c @@ -158,7 +158,7 @@ bool bdrv_requests_pending(BlockDriverState *bs) static bool bdrv_drain_recurse(BlockDriverState *bs) { - BdrvChild *child; + BdrvChild *child, *tmp; bool waited; waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0); @@ -167,8 +167,25 @@ static bool bdrv_drain_recurse(BlockDriverState *bs) bs->drv->bdrv_drain(bs); } - QLIST_FOREACH(child, &bs->children, next) { - waited |= bdrv_drain_recurse(child->bs); + QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) { + BlockDriverState *bs = child->bs; + bool in_main_loop = + qemu_get_current_aio_context() == qemu_get_aio_context(); + assert(bs->refcnt > 0); + if (in_main_loop) { + /* In case the recursive bdrv_drain_recurse processes a + * block_job_defer_to_main_loop BH and modifies the graph, + * let's hold a reference to bs until we are done. + * + * IOThread doesn't have such a BH, and it is not safe to call + * bdrv_unref without BQL, so skip doing it there. + */ + bdrv_ref(bs); + } + waited |= bdrv_drain_recurse(bs); + if (in_main_loop) { + bdrv_unref(bs); + } } return waited; From 91af091f92358c2ff828fa1def1a7bea9b701cdf Mon Sep 17 00:00:00 2001 From: Fam Zheng Date: Tue, 18 Apr 2017 22:30:44 +0800 Subject: [PATCH 2/2] block: Drain BH in bdrv_drained_begin During block job completion, nothing is preventing block_job_defer_to_main_loop_bh from being called in a nested aio_poll(), which is a trouble, such as in this code path: qmp_block_commit commit_active_start bdrv_reopen bdrv_reopen_multiple bdrv_reopen_prepare bdrv_flush aio_poll aio_bh_poll aio_bh_call block_job_defer_to_main_loop_bh stream_complete bdrv_reopen block_job_defer_to_main_loop_bh is the last step of the stream job, which should have been "paused" by the bdrv_drained_begin/end in bdrv_reopen_multiple, but it is not done because it's in the form of a main loop BH. Similar to why block jobs should be paused between drained_begin and drained_end, BHs they schedule must be excluded as well. To achieve this, this patch forces draining the BH in BDRV_POLL_WHILE. As a side effect this fixes a hang in block_job_detach_aio_context during system_reset when a block job is ready: #0 0x0000555555aa79f3 in bdrv_drain_recurse #1 0x0000555555aa825d in bdrv_drained_begin #2 0x0000555555aa8449 in bdrv_drain #3 0x0000555555a9c356 in blk_drain #4 0x0000555555aa3cfd in mirror_drain #5 0x0000555555a66e11 in block_job_detach_aio_context #6 0x0000555555a62f4d in bdrv_detach_aio_context #7 0x0000555555a63116 in bdrv_set_aio_context #8 0x0000555555a9d326 in blk_set_aio_context #9 0x00005555557e38da in virtio_blk_data_plane_stop #10 0x00005555559f9d5f in virtio_bus_stop_ioeventfd #11 0x00005555559fa49b in virtio_bus_stop_ioeventfd #12 0x00005555559f6a18 in virtio_pci_stop_ioeventfd #13 0x00005555559f6a18 in virtio_pci_reset #14 0x00005555559139a9 in qdev_reset_one #15 0x0000555555916738 in qbus_walk_children #16 0x0000555555913318 in qdev_walk_children #17 0x0000555555916738 in qbus_walk_children #18 0x00005555559168ca in qemu_devices_reset #19 0x000055555581fcbb in pc_machine_reset #20 0x00005555558a4d96 in qemu_system_reset #21 0x000055555577157a in main_loop_should_exit #22 0x000055555577157a in main_loop #23 0x000055555577157a in main The rationale is that the loop in block_job_detach_aio_context cannot make any progress in pausing/completing the job, because bs->in_flight is 0, so bdrv_drain doesn't process the block_job_defer_to_main_loop BH. With this patch, it does. Reported-by: Jeff Cody Signed-off-by: Fam Zheng Message-Id: <20170418143044.12187-3-famz@redhat.com> Reviewed-by: Jeff Cody Tested-by: Jeff Cody Signed-off-by: Fam Zheng --- include/block/block.h | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/include/block/block.h b/include/block/block.h index 97d4330292..5ddc0cf21b 100644 --- a/include/block/block.h +++ b/include/block/block.h @@ -381,12 +381,13 @@ void bdrv_drain_all(void); #define BDRV_POLL_WHILE(bs, cond) ({ \ bool waited_ = false; \ + bool busy_ = true; \ BlockDriverState *bs_ = (bs); \ AioContext *ctx_ = bdrv_get_aio_context(bs_); \ if (aio_context_in_iothread(ctx_)) { \ - while ((cond)) { \ - aio_poll(ctx_, true); \ - waited_ = true; \ + while ((cond) || busy_) { \ + busy_ = aio_poll(ctx_, (cond)); \ + waited_ |= !!(cond) | busy_; \ } \ } else { \ assert(qemu_get_current_aio_context() == \ @@ -398,11 +399,16 @@ void bdrv_drain_all(void); */ \ assert(!bs_->wakeup); \ bs_->wakeup = true; \ - while ((cond)) { \ - aio_context_release(ctx_); \ - aio_poll(qemu_get_aio_context(), true); \ - aio_context_acquire(ctx_); \ - waited_ = true; \ + while (busy_) { \ + if ((cond)) { \ + waited_ = busy_ = true; \ + aio_context_release(ctx_); \ + aio_poll(qemu_get_aio_context(), true); \ + aio_context_acquire(ctx_); \ + } else { \ + busy_ = aio_poll(ctx_, false); \ + waited_ |= busy_; \ + } \ } \ bs_->wakeup = false; \ } \