From d792bc3811f22a22a46c7d9a725fd29029f54095 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 24 Aug 2017 08:22:00 +0100 Subject: [PATCH 1/4] qemu.py: make VM() a context manager There are a number of ways to ensure that the QEMU process is shut down when the test ends, including atexit.register(), try: finally:, or unittest.teardown() methods. All of these require extra code and the programmer must remember to add vm.shutdown(). A nice solution is context managers: with VM(binary) as vm: ... # vm is guaranteed to be shut down here Cc: Eduardo Habkost Signed-off-by: Stefan Hajnoczi Reviewed-by: Eduardo Habkost Message-id: 20170824072202.26818-2-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi --- scripts/qemu.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/scripts/qemu.py b/scripts/qemu.py index 880e3e8219..4d8ee10943 100644 --- a/scripts/qemu.py +++ b/scripts/qemu.py @@ -21,7 +21,14 @@ class QEMUMachine(object): - '''A QEMU VM''' + '''A QEMU VM + + Use this object as a context manager to ensure the QEMU process terminates:: + + with VM(binary) as vm: + ... + # vm is guaranteed to be shut down here + ''' def __init__(self, binary, args=[], wrapper=[], name=None, test_dir="/var/tmp", monitor_address=None, socket_scm_helper=None, debug=False): @@ -40,6 +47,13 @@ def __init__(self, binary, args=[], wrapper=[], name=None, test_dir="/var/tmp", self._socket_scm_helper = socket_scm_helper self._debug = debug + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown() + return False + # This can be used to add an unused monitor instance. def add_monitor_telnet(self, ip, port): args = 'tcp:%s:%d,server,nowait,telnet' % (ip, port) From f4844ac0adabc458ba4610a71155448783d37c73 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 24 Aug 2017 08:22:01 +0100 Subject: [PATCH 2/4] iotests.py: add FilePath context manager The scratch/ (TEST_DIR) directory is not automatically cleaned up after test execution. It is the responsibility of tests to remove any files they create. A nice way of doing this is to declare files at the beginning of the test and automatically remove them with a context manager: with iotests.FilePath('test.img') as img_path: qemu_img(...) qemu_io(...) # img_path is guaranteed to be deleted here Signed-off-by: Stefan Hajnoczi Message-id: 20170824072202.26818-3-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi --- tests/qemu-iotests/iotests.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py index 7233983f3c..07fa1626a0 100644 --- a/tests/qemu-iotests/iotests.py +++ b/tests/qemu-iotests/iotests.py @@ -160,6 +160,32 @@ def __exit__(self, type, value, traceback): def timeout(self, signum, frame): raise Exception(self.errmsg) + +class FilePath(object): + '''An auto-generated filename that cleans itself up. + + Use this context manager to generate filenames and ensure that the file + gets deleted:: + + with TestFilePath('test.img') as img_path: + qemu_img('create', img_path, '1G') + # migration_sock_path is automatically deleted + ''' + def __init__(self, name): + filename = '{0}-{1}'.format(os.getpid(), name) + self.path = os.path.join(test_dir, filename) + + def __enter__(self): + return self.path + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + os.remove(self.path) + except OSError: + pass + return False + + class VM(qtest.QEMUQtestMachine): '''A QEMU VM''' From 921a32179bb0a31c45a505f4ef36de4893be9612 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 24 Aug 2017 08:22:02 +0100 Subject: [PATCH 3/4] qemu-iotests: use context managers for resource cleanup in 194 Switch from atexit.register() to a more elegant idiom of declaring resources in a with statement: with FilePath('monitor.sock') as monitor_path, VM() as vm: ... The files and VMs will be automatically cleaned up whether the test passes or fails. Signed-off-by: Stefan Hajnoczi Message-id: 20170824072202.26818-4-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi --- tests/qemu-iotests/194 | 103 ++++++++++++++++++++--------------------- 1 file changed, 51 insertions(+), 52 deletions(-) diff --git a/tests/qemu-iotests/194 b/tests/qemu-iotests/194 index a3e3bad664..6449b9b64a 100755 --- a/tests/qemu-iotests/194 +++ b/tests/qemu-iotests/194 @@ -19,66 +19,65 @@ # # Non-shared storage migration test using NBD server and drive-mirror -import os -import atexit import iotests iotests.verify_platform(['linux']) -img_size = '1G' -source_img_path = os.path.join(iotests.test_dir, 'source.img') -dest_img_path = os.path.join(iotests.test_dir, 'dest.img') -iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, source_img_path, img_size) -iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, dest_img_path, img_size) +with iotests.FilePath('source.img') as source_img_path, \ + iotests.FilePath('dest.img') as dest_img_path, \ + iotests.FilePath('migration.sock') as migration_sock_path, \ + iotests.FilePath('nbd.sock') as nbd_sock_path, \ + iotests.VM('source') as source_vm, \ + iotests.VM('dest') as dest_vm: -iotests.log('Launching VMs...') -migration_sock_path = os.path.join(iotests.test_dir, 'migration.sock') -nbd_sock_path = os.path.join(iotests.test_dir, 'nbd.sock') -source_vm = iotests.VM('source').add_drive(source_img_path) -dest_vm = (iotests.VM('dest').add_drive(dest_img_path) - .add_incoming('unix:{0}'.format(migration_sock_path))) -source_vm.launch() -atexit.register(source_vm.shutdown) -dest_vm.launch() -atexit.register(dest_vm.shutdown) + img_size = '1G' + iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, source_img_path, img_size) + iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, dest_img_path, img_size) -iotests.log('Launching NBD server on destination...') -iotests.log(dest_vm.qmp('nbd-server-start', addr={'type': 'unix', 'data': {'path': nbd_sock_path}})) -iotests.log(dest_vm.qmp('nbd-server-add', device='drive0', writable=True)) + iotests.log('Launching VMs...') + (source_vm.add_drive(source_img_path) + .launch()) + (dest_vm.add_drive(dest_img_path) + .add_incoming('unix:{0}'.format(migration_sock_path)) + .launch()) -iotests.log('Starting `drive-mirror` on source...') -iotests.log(source_vm.qmp( - 'drive-mirror', - device='drive0', - target='nbd+unix:///drive0?socket={0}'.format(nbd_sock_path), - sync='full', - format='raw', # always raw, the server handles the format - mode='existing', - job_id='mirror-job0')) + iotests.log('Launching NBD server on destination...') + iotests.log(dest_vm.qmp('nbd-server-start', addr={'type': 'unix', 'data': {'path': nbd_sock_path}})) + iotests.log(dest_vm.qmp('nbd-server-add', device='drive0', writable=True)) -iotests.log('Waiting for `drive-mirror` to complete...') -iotests.log(source_vm.event_wait('BLOCK_JOB_READY'), - filters=[iotests.filter_qmp_event]) + iotests.log('Starting `drive-mirror` on source...') + iotests.log(source_vm.qmp( + 'drive-mirror', + device='drive0', + target='nbd+unix:///drive0?socket={0}'.format(nbd_sock_path), + sync='full', + format='raw', # always raw, the server handles the format + mode='existing', + job_id='mirror-job0')) -iotests.log('Starting migration...') -source_vm.qmp('migrate-set-capabilities', - capabilities=[{'capability': 'events', 'state': True}]) -dest_vm.qmp('migrate-set-capabilities', - capabilities=[{'capability': 'events', 'state': True}]) -iotests.log(source_vm.qmp('migrate', uri='unix:{0}'.format(migration_sock_path))) + iotests.log('Waiting for `drive-mirror` to complete...') + iotests.log(source_vm.event_wait('BLOCK_JOB_READY'), + filters=[iotests.filter_qmp_event]) -while True: - event1 = source_vm.event_wait('MIGRATION') - iotests.log(event1, filters=[iotests.filter_qmp_event]) - if event1['data']['status'] in ('completed', 'failed'): - iotests.log('Gracefully ending the `drive-mirror` job on source...') - iotests.log(source_vm.qmp('block-job-cancel', device='mirror-job0')) - break + iotests.log('Starting migration...') + source_vm.qmp('migrate-set-capabilities', + capabilities=[{'capability': 'events', 'state': True}]) + dest_vm.qmp('migrate-set-capabilities', + capabilities=[{'capability': 'events', 'state': True}]) + iotests.log(source_vm.qmp('migrate', uri='unix:{0}'.format(migration_sock_path))) -while True: - event2 = source_vm.event_wait('BLOCK_JOB_COMPLETED') - iotests.log(event2, filters=[iotests.filter_qmp_event]) - if event2['event'] == 'BLOCK_JOB_COMPLETED': - iotests.log('Stopping the NBD server on destination...') - iotests.log(dest_vm.qmp('nbd-server-stop')) - break + while True: + event1 = source_vm.event_wait('MIGRATION') + iotests.log(event1, filters=[iotests.filter_qmp_event]) + if event1['data']['status'] in ('completed', 'failed'): + iotests.log('Gracefully ending the `drive-mirror` job on source...') + iotests.log(source_vm.qmp('block-job-cancel', device='mirror-job0')) + break + + while True: + event2 = source_vm.event_wait('BLOCK_JOB_COMPLETED') + iotests.log(event2, filters=[iotests.filter_qmp_event]) + if event2['event'] == 'BLOCK_JOB_COMPLETED': + iotests.log('Stopping the NBD server on destination...') + iotests.log(dest_vm.qmp('nbd-server-stop')) + break From b461151ff31c7925f271c297e8abed20231ac7d3 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Thu, 31 Aug 2017 11:54:56 +0100 Subject: [PATCH 4/4] block: document semantics of bdrv_co_preadv|pwritev Reviewed-by: Stefan Hajnoczi Reviewed-by: Eric Blake Signed-off-by: Daniel P. Berrange Message-id: 20170831105456.9558-1-berrange@redhat.com Signed-off-by: Stefan Hajnoczi --- include/block/block_int.h | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/include/block/block_int.h b/include/block/block_int.h index 7571c0aaaf..7816b43a27 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -146,12 +146,43 @@ struct BlockDriver { int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); + + /** + * @offset: position in bytes to read at + * @bytes: number of bytes to read + * @qiov: the buffers to fill with read data + * @flags: currently unused, always 0 + * + * @offset and @bytes will be a multiple of 'request_alignment', + * but the length of individual @qiov elements does not have to + * be a multiple. + * + * @bytes will always equal the total size of @qiov, and will be + * no larger than 'max_transfer'. + * + * The buffer in @qiov may point directly to guest memory. + */ int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); int coroutine_fn (*bdrv_co_writev_flags)(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags); + /** + * @offset: position in bytes to write at + * @bytes: number of bytes to write + * @qiov: the buffers containing data to write + * @flags: zero or more bits allowed by 'supported_write_flags' + * + * @offset and @bytes will be a multiple of 'request_alignment', + * but the length of individual @qiov elements does not have to + * be a multiple. + * + * @bytes will always equal the total size of @qiov, and will be + * no larger than 'max_transfer'. + * + * The buffer in @qiov may point directly to guest memory. + */ int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);