mirror of https://gitee.com/openkylin/qemu.git
Block patches
-----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJaopzgAAoJEPQH2wBh1c9AAogIAJHLFspLkjWQ4PEEsGFuYZDS 8h/h2atY+jR3Rp1jigcMmkirnnrxLSojdzjk6EgsykK7n1Zi6ai8ylCt1Az0Fi9p PzNu6VwvvqIaEHw9P6bcN1tNS+osQPrORRoRqK/d+H07pcfcT6rePNH6ONa9p9Hj G6FlLRrC4RvDSPRFAJZpxO98K9TETpmC+Dwhg8pERhIzqUDqfqoWICPUzY6ozgSR 1orwNdWuHfxf/lTBOnoHXj6MeJIFbrpIyVWMX1g7O0pwVye4cewQgOz8pegWhicD +ZY8T0zqw9d0w3ER0sX1JBuXv97cNagoEAPMuPiVE0eheh1b2V3BBTDud2pY10M= =6Wgv -----END PGP SIGNATURE----- Merge remote-tracking branch 'mreitz/tags/pull-block-2018-03-09' into queue-block Block patches # gpg: Signature made Fri Mar 9 15:40:32 2018 CET # gpg: using RSA key F407DB0061D5CF40 # gpg: Good signature from "Max Reitz <mreitz@redhat.com>" # Primary key fingerprint: 91BE B60A 30DB 3E88 57D1 1829 F407 DB00 61D5 CF40 * mreitz/tags/pull-block-2018-03-09: qemu-iotests: fix 203 migration completion race iotests: Tweak 030 in order to trigger a race condition with parallel jobs iotests: Skip test for ENOMEM error iotests: Mark all tests executable iotests: Test creating overlay when guest running Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
commit
a1be5921e3
|
@ -156,7 +156,7 @@ class TestSingleDrive(iotests.QMPTestCase):
|
|||
class TestParallelOps(iotests.QMPTestCase):
|
||||
num_ops = 4 # Number of parallel block-stream operations
|
||||
num_imgs = num_ops * 2 + 1
|
||||
image_len = num_ops * 1024 * 1024
|
||||
image_len = num_ops * 512 * 1024
|
||||
imgs = []
|
||||
|
||||
def setUp(self):
|
||||
|
@ -176,14 +176,14 @@ class TestParallelOps(iotests.QMPTestCase):
|
|||
'-o', 'backing_file=%s' % self.imgs[i-1], self.imgs[i])
|
||||
|
||||
# Put data into the images we are copying data from
|
||||
for i in range(self.num_imgs / 2):
|
||||
img_index = i * 2 + 1
|
||||
# Alternate between 512k and 1M.
|
||||
odd_img_indexes = [x for x in reversed(range(self.num_imgs)) if x % 2 == 1]
|
||||
for i in range(len(odd_img_indexes)):
|
||||
# Alternate between 256KB and 512KB.
|
||||
# This way jobs will not finish in the same order they were created
|
||||
num_kb = 512 + 512 * (i % 2)
|
||||
num_kb = 256 + 256 * (i % 2)
|
||||
qemu_io('-f', iotests.imgfmt,
|
||||
'-c', 'write -P %d %d %d' % (i, i*1024*1024, num_kb * 1024),
|
||||
self.imgs[img_index])
|
||||
'-c', 'write -P 0xFF %dk %dk' % (i * 512, num_kb),
|
||||
self.imgs[odd_img_indexes[i]])
|
||||
|
||||
# Attach the drive to the VM
|
||||
self.vm = iotests.VM()
|
||||
|
@ -318,12 +318,14 @@ class TestParallelOps(iotests.QMPTestCase):
|
|||
self.wait_until_completed(drive='commit-drive0')
|
||||
|
||||
# Test a block-stream and a block-commit job in parallel
|
||||
def test_stream_commit(self):
|
||||
# Here the stream job is supposed to finish quickly in order to reproduce
|
||||
# the scenario that triggers the bug fixed in 3d5d319e1221 and 1a63a907507
|
||||
def test_stream_commit_1(self):
|
||||
self.assertLessEqual(8, self.num_imgs)
|
||||
self.assert_no_active_block_jobs()
|
||||
|
||||
# Stream from node0 into node2
|
||||
result = self.vm.qmp('block-stream', device='node2', job_id='node2')
|
||||
result = self.vm.qmp('block-stream', device='node2', base_node='node0', job_id='node2')
|
||||
self.assert_qmp(result, 'return', {})
|
||||
|
||||
# Commit from the active layer into node3
|
||||
|
@ -348,6 +350,38 @@ class TestParallelOps(iotests.QMPTestCase):
|
|||
|
||||
self.assert_no_active_block_jobs()
|
||||
|
||||
# This is similar to test_stream_commit_1 but both jobs are slowed
|
||||
# down so they can run in parallel for a little while.
|
||||
def test_stream_commit_2(self):
|
||||
self.assertLessEqual(8, self.num_imgs)
|
||||
self.assert_no_active_block_jobs()
|
||||
|
||||
# Stream from node0 into node4
|
||||
result = self.vm.qmp('block-stream', device='node4', base_node='node0', job_id='node4', speed=1024*1024)
|
||||
self.assert_qmp(result, 'return', {})
|
||||
|
||||
# Commit from the active layer into node5
|
||||
result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[5], speed=1024*1024)
|
||||
self.assert_qmp(result, 'return', {})
|
||||
|
||||
# Wait for all jobs to be finished.
|
||||
pending_jobs = ['node4', 'drive0']
|
||||
while len(pending_jobs) > 0:
|
||||
for event in self.vm.get_qmp_events(wait=True):
|
||||
if event['event'] == 'BLOCK_JOB_COMPLETED':
|
||||
node_name = self.dictpath(event, 'data/device')
|
||||
self.assertTrue(node_name in pending_jobs)
|
||||
self.assert_qmp_absent(event, 'data/error')
|
||||
pending_jobs.remove(node_name)
|
||||
if event['event'] == 'BLOCK_JOB_READY':
|
||||
self.assert_qmp(event, 'data/device', 'drive0')
|
||||
self.assert_qmp(event, 'data/type', 'commit')
|
||||
self.assert_qmp_absent(event, 'data/error')
|
||||
self.assertTrue('drive0' in pending_jobs)
|
||||
self.vm.qmp('block-job-complete', device='drive0')
|
||||
|
||||
self.assert_no_active_block_jobs()
|
||||
|
||||
# Test the base_node parameter
|
||||
def test_stream_base_node_name(self):
|
||||
self.assert_no_active_block_jobs()
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
.......................
|
||||
........................
|
||||
----------------------------------------------------------------------
|
||||
Ran 23 tests
|
||||
Ran 24 tests
|
||||
|
||||
OK
|
||||
|
|
|
@ -152,9 +152,8 @@ done
|
|||
echo
|
||||
echo "=== Testing afl image with a very large capacity ==="
|
||||
_use_sample_img afl9.vmdk.bz2
|
||||
# The sed makes this test pass on machines with little RAM
|
||||
# (and also with 32 bit builds)
|
||||
_img_info | sed -e 's/Cannot allocate memory/Invalid argument/'
|
||||
_img_info | grep -q 'Cannot allocate memory' && _notrun "Insufficent memory, skipped test"
|
||||
_img_info
|
||||
_cleanup_test_img
|
||||
|
||||
# success, all done
|
||||
|
|
|
@ -32,6 +32,7 @@ _cleanup()
|
|||
{
|
||||
_cleanup_test_img
|
||||
rm -f "${TEST_IMG}.base"
|
||||
rm -f "${TEST_IMG}.overlay"
|
||||
rm -f "${TEST_IMG}.convert"
|
||||
rm -f "${TEST_IMG}.a"
|
||||
rm -f "${TEST_IMG}.b"
|
||||
|
@ -177,8 +178,6 @@ rm -f "${TEST_IMG}.lnk" &>/dev/null
|
|||
ln -s ${TEST_IMG} "${TEST_IMG}.lnk" || echo "Failed to create link"
|
||||
_run_qemu_with_images "${TEST_IMG}.lnk" "${TEST_IMG}"
|
||||
|
||||
echo
|
||||
echo "== Closing an image should unlock it =="
|
||||
_launch_qemu
|
||||
|
||||
_send_qemu_cmd $QEMU_HANDLE \
|
||||
|
@ -193,7 +192,10 @@ _send_qemu_cmd $QEMU_HANDLE \
|
|||
|
||||
_run_cmd $QEMU_IO "${TEST_IMG}" -c 'write 0 512'
|
||||
|
||||
echo "Closing drive"
|
||||
echo "Creating overlay with qemu-img when the guest is running should be allowed"
|
||||
_run_cmd $QEMU_IMG create -f $IMGFMT -b "${TEST_IMG}" "${TEST_IMG}.overlay"
|
||||
|
||||
echo "== Closing an image should unlock it =="
|
||||
_send_qemu_cmd $QEMU_HANDLE \
|
||||
"{ 'execute': 'human-monitor-command',
|
||||
'arguments': { 'command-line': 'drive_del d0' } }" \
|
||||
|
|
|
@ -372,15 +372,16 @@ Is another process using the image?
|
|||
== Symbolic link ==
|
||||
QEMU_PROG: -drive if=none,file=TEST_DIR/t.qcow2: Failed to get "write" lock
|
||||
Is another process using the image?
|
||||
|
||||
== Closing an image should unlock it ==
|
||||
{"return": {}}
|
||||
Adding drive
|
||||
|
||||
_qemu_io_wrapper TEST_DIR/t.qcow2 -c write 0 512
|
||||
can't open device TEST_DIR/t.qcow2: Failed to get "write" lock
|
||||
Is another process using the image?
|
||||
Closing drive
|
||||
Creating overlay with qemu-img when the guest is running should be allowed
|
||||
|
||||
_qemu_img_wrapper create -f qcow2 -b TEST_DIR/t.qcow2 TEST_DIR/t.qcow2.overlay
|
||||
== Closing an image should unlock it ==
|
||||
|
||||
_qemu_io_wrapper TEST_DIR/t.qcow2 -c write 0 512
|
||||
Adding two and closing one
|
||||
|
|
|
@ -49,11 +49,18 @@ with iotests.FilePath('disk0.img') as disk0_img_path, \
|
|||
node_name='drive1-node', iothread='iothread0',
|
||||
force=True))
|
||||
|
||||
iotests.log('Enabling migration QMP events...')
|
||||
iotests.log(vm.qmp('migrate-set-capabilities', capabilities=[
|
||||
{
|
||||
'capability': 'events',
|
||||
'state': True
|
||||
}
|
||||
]))
|
||||
|
||||
iotests.log('Starting migration...')
|
||||
iotests.log(vm.qmp('migrate', uri='exec:cat >/dev/null'))
|
||||
while True:
|
||||
vm.get_qmp_event(wait=60.0)
|
||||
result = vm.qmp('query-migrate')
|
||||
status = result.get('return', {}).get('status', None)
|
||||
if status == 'completed':
|
||||
event = vm.event_wait('MIGRATION')
|
||||
iotests.log(event, filters=[iotests.filter_qmp_event])
|
||||
if event['data']['status'] == 'completed':
|
||||
break
|
||||
|
|
|
@ -2,5 +2,10 @@ Launching VM...
|
|||
Setting IOThreads...
|
||||
{u'return': {}}
|
||||
{u'return': {}}
|
||||
Enabling migration QMP events...
|
||||
{u'return': {}}
|
||||
Starting migration...
|
||||
{u'return': {}}
|
||||
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'setup'}, u'event': u'MIGRATION'}
|
||||
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'active'}, u'event': u'MIGRATION'}
|
||||
{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'completed'}, u'event': u'MIGRATION'}
|
||||
|
|
Loading…
Reference in New Issue