mirror of https://gitee.com/openkylin/qemu.git
scripts/simplebench improvements for 2021-05-04
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEi5wmzbL9FHyIDoahVh8kwfGfefsFAmCRCZsACgkQVh8kwfGf efuD8g//cALt2FiC5R0RwVqUhZ/RAOHM7LrENx8GxHgzZZMiWuF7hX9JvAtNzyUz dflFxkOv1mhBfbXpU6ymrsfR5AKzyXlKMWfi14tGJKxIfJODPmBPVwM5IZmQCuMG 2laD5XPIj/sgIRUZXN5dXlyGlnTujyKpeubgN1dx8g+xJa2MvZ3lRJciEvGjJNbJ QZX3kEQaBAkjSeDcS8RH1S3VoEaaon3rUyOUap6WpbDiLM/ZcgYZYQEjBnkinJXm IJoUr5cR8gh9oxz82XFTJFLJ6ujUY/yMjHoHDRXCtmdlWU56oqJPQxfcqnzbOSk1 OrdIOofN8qM4ks4W5SL4aquBy3DIgfSZui1/+UboKGfQBU5/vxAuzsv9LCgiG4PL Xkitpy09scxerKci9W9zyBHpMTIhIaf/ISxtYWaZlUSDLiuh1O6d28/TmMlRz3Xb HUW9TZqhzFkmw8koqe2HcgCoWW4MHaFoBvwe/GTuQySJC8hts7gO36znN4Y3yTzS 9GcBybVnn6hKXn+N90iqgkYHNXJ3+CfF149Gfg30jg8Fm9oEJKFuAJnPNm5HQAoh 3LOF3D/qq7tmU6imE8molU1uTgxcIpA5fYu5rAxFPNUF5QpfATh7/IIp2Y7+ZCOg 9OE9uLJalUe3juZTccv/3KFRjqnewz0zT8UQBLrL3XNZBC52AM0= =Abdk -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/vsementsov/tags/pull-simplebench-2021-05-04' into staging scripts/simplebench improvements for 2021-05-04 # gpg: Signature made Tue 04 May 2021 09:45:15 BST # gpg: using RSA key 8B9C26CDB2FD147C880E86A1561F24C1F19F79FB # gpg: Good signature from "Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 8B9C 26CD B2FD 147C 880E 86A1 561F 24C1 F19F 79FB * remotes/vsementsov/tags/pull-simplebench-2021-05-04: MAINTAINERS: update Benchmark util: add git tree simplebench/bench-backup: add --drop-caches argument simplebench/bench-backup: add --count and --no-initial-run simplebench/bench-backup: support qcow2 source files simplebench/bench_block_job: handle error in BLOCK_JOB_COMPLETED simplebench/bench-backup: add target-cache argument simplebench/bench-backup: add --compressed option simplebench: bench_one(): support count=1 simplebench: bench_one(): add slow_limit argument Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
6c769690ac
|
@ -2525,6 +2525,7 @@ Benchmark util
|
|||
M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
|
||||
S: Maintained
|
||||
F: scripts/simplebench/
|
||||
T: git https://src.openvz.org/scm/~vsementsov/qemu.git simplebench
|
||||
|
||||
Transactions helper
|
||||
M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
import simplebench
|
||||
from results_to_text import results_to_text
|
||||
from bench_block_job import bench_block_copy, drv_file, drv_nbd
|
||||
from bench_block_job import bench_block_copy, drv_file, drv_nbd, drv_qcow2
|
||||
|
||||
|
||||
def bench_func(env, case):
|
||||
|
@ -37,29 +37,56 @@ def bench_func(env, case):
|
|||
def bench(args):
|
||||
test_cases = []
|
||||
|
||||
sources = {}
|
||||
targets = {}
|
||||
for d in args.dir:
|
||||
label, path = d.split(':') # paths with colon not supported
|
||||
sources[label] = drv_file(path + '/test-source')
|
||||
targets[label] = drv_file(path + '/test-target')
|
||||
# paths with colon not supported, so we just split by ':'
|
||||
dirs = dict(d.split(':') for d in args.dir)
|
||||
|
||||
nbd_drv = None
|
||||
if args.nbd:
|
||||
nbd = args.nbd.split(':')
|
||||
host = nbd[0]
|
||||
port = '10809' if len(nbd) == 1 else nbd[1]
|
||||
drv = drv_nbd(host, port)
|
||||
sources['nbd'] = drv
|
||||
targets['nbd'] = drv
|
||||
nbd_drv = drv_nbd(host, port)
|
||||
|
||||
for t in args.test:
|
||||
src, dst = t.split(':')
|
||||
|
||||
test_cases.append({
|
||||
'id': t,
|
||||
'source': sources[src],
|
||||
'target': targets[dst]
|
||||
})
|
||||
if src == 'nbd' and dst == 'nbd':
|
||||
raise ValueError("Can't use 'nbd' label for both src and dst")
|
||||
|
||||
if (src == 'nbd' or dst == 'nbd') and not nbd_drv:
|
||||
raise ValueError("'nbd' label used but --nbd is not given")
|
||||
|
||||
if src == 'nbd':
|
||||
source = nbd_drv
|
||||
elif args.qcow2_sources:
|
||||
source = drv_qcow2(drv_file(dirs[src] + '/test-source.qcow2'))
|
||||
else:
|
||||
source = drv_file(dirs[src] + '/test-source')
|
||||
|
||||
if dst == 'nbd':
|
||||
test_cases.append({'id': t, 'source': source, 'target': nbd_drv})
|
||||
continue
|
||||
|
||||
if args.target_cache == 'both':
|
||||
target_caches = ['direct', 'cached']
|
||||
else:
|
||||
target_caches = [args.target_cache]
|
||||
|
||||
for c in target_caches:
|
||||
o_direct = c == 'direct'
|
||||
fname = dirs[dst] + '/test-target'
|
||||
if args.compressed:
|
||||
fname += '.qcow2'
|
||||
target = drv_file(fname, o_direct=o_direct)
|
||||
if args.compressed:
|
||||
target = drv_qcow2(target)
|
||||
|
||||
test_id = t
|
||||
if args.target_cache == 'both':
|
||||
test_id += f'({c})'
|
||||
|
||||
test_cases.append({'id': test_id, 'source': source,
|
||||
'target': target})
|
||||
|
||||
binaries = [] # list of (<label>, <path>, [<options>])
|
||||
for i, q in enumerate(args.env):
|
||||
|
@ -106,6 +133,13 @@ def bench(args):
|
|||
elif opt.startswith('max-workers='):
|
||||
x_perf['max-workers'] = int(opt.split('=')[1])
|
||||
|
||||
backup_options = {}
|
||||
if x_perf:
|
||||
backup_options['x-perf'] = x_perf
|
||||
|
||||
if args.compressed:
|
||||
backup_options['compress'] = True
|
||||
|
||||
if is_mirror:
|
||||
assert not x_perf
|
||||
test_envs.append({
|
||||
|
@ -117,11 +151,13 @@ def bench(args):
|
|||
test_envs.append({
|
||||
'id': f'backup({label})\n' + '\n'.join(opts),
|
||||
'cmd': 'blockdev-backup',
|
||||
'cmd-options': {'x-perf': x_perf} if x_perf else {},
|
||||
'cmd-options': backup_options,
|
||||
'qemu-binary': path
|
||||
})
|
||||
|
||||
result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
|
||||
result = simplebench.bench(bench_func, test_envs, test_cases,
|
||||
count=args.count, initial_run=args.initial_run,
|
||||
drop_caches=args.drop_caches)
|
||||
with open('results.json', 'w') as f:
|
||||
json.dump(result, f, indent=4)
|
||||
print(results_to_text(result))
|
||||
|
@ -163,5 +199,30 @@ def __call__(self, parser, namespace, values, option_string=None):
|
|||
p.add_argument('--test', nargs='+', help='''\
|
||||
Tests, in form source-dir-label:target-dir-label''',
|
||||
action=ExtendAction)
|
||||
p.add_argument('--compressed', help='''\
|
||||
Use compressed backup. It automatically means
|
||||
automatically creating qcow2 target with
|
||||
lazy_refcounts for each test run''', action='store_true')
|
||||
p.add_argument('--qcow2-sources', help='''\
|
||||
Use test-source.qcow2 images as sources instead of
|
||||
test-source raw images''', action='store_true')
|
||||
p.add_argument('--target-cache', help='''\
|
||||
Setup cache for target nodes. Options:
|
||||
direct: default, use O_DIRECT and aio=native
|
||||
cached: use system cache (Qemu default) and aio=threads (Qemu default)
|
||||
both: generate two test cases for each src:dst pair''',
|
||||
default='direct', choices=('direct', 'cached', 'both'))
|
||||
|
||||
p.add_argument('--count', type=int, default=3, help='''\
|
||||
Number of test runs per table cell''')
|
||||
|
||||
# BooleanOptionalAction helps to support --no-initial-run option
|
||||
p.add_argument('--initial-run', action=argparse.BooleanOptionalAction,
|
||||
help='''\
|
||||
Do additional initial run per cell which doesn't count in result,
|
||||
default true''')
|
||||
|
||||
p.add_argument('--drop-caches', action='store_true', help='''\
|
||||
Do "sync; echo 3 > /proc/sys/vm/drop_caches" before each test run''')
|
||||
|
||||
bench(p.parse_args())
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import socket
|
||||
import json
|
||||
|
||||
|
@ -69,6 +70,10 @@ def bench_block_job(cmd, cmd_args, qemu_args):
|
|||
vm.shutdown()
|
||||
return {'error': 'block-job failed: ' + str(e),
|
||||
'vm-log': vm.get_log()}
|
||||
if 'error' in e['data']:
|
||||
vm.shutdown()
|
||||
return {'error': 'block-job failed: ' + e['data']['error'],
|
||||
'vm-log': vm.get_log()}
|
||||
end_ms = e['timestamp']['seconds'] * 1000000 + \
|
||||
e['timestamp']['microseconds']
|
||||
finally:
|
||||
|
@ -77,11 +82,34 @@ def bench_block_job(cmd, cmd_args, qemu_args):
|
|||
return {'seconds': (end_ms - start_ms) / 1000000.0}
|
||||
|
||||
|
||||
def get_image_size(path):
|
||||
out = subprocess.run(['qemu-img', 'info', '--out=json', path],
|
||||
stdout=subprocess.PIPE, check=True).stdout
|
||||
return json.loads(out)['virtual-size']
|
||||
|
||||
|
||||
def get_blockdev_size(obj):
|
||||
img = obj['filename'] if 'filename' in obj else obj['file']['filename']
|
||||
return get_image_size(img)
|
||||
|
||||
|
||||
# Bench backup or mirror
|
||||
def bench_block_copy(qemu_binary, cmd, cmd_options, source, target):
|
||||
"""Helper to run bench_block_job() for mirror or backup"""
|
||||
assert cmd in ('blockdev-backup', 'blockdev-mirror')
|
||||
|
||||
if target['driver'] == 'qcow2':
|
||||
try:
|
||||
os.remove(target['file']['filename'])
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
subprocess.run(['qemu-img', 'create', '-f', 'qcow2',
|
||||
target['file']['filename'],
|
||||
str(get_blockdev_size(source))],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL, check=True)
|
||||
|
||||
source['node-name'] = 'source'
|
||||
target['node-name'] = 'target'
|
||||
|
||||
|
@ -96,9 +124,13 @@ def bench_block_copy(qemu_binary, cmd, cmd_options, source, target):
|
|||
'-blockdev', json.dumps(target)])
|
||||
|
||||
|
||||
def drv_file(filename):
|
||||
return {'driver': 'file', 'filename': filename,
|
||||
'cache': {'direct': True}, 'aio': 'native'}
|
||||
def drv_file(filename, o_direct=True):
|
||||
node = {'driver': 'file', 'filename': filename}
|
||||
if o_direct:
|
||||
node['cache'] = {'direct': True}
|
||||
node['aio'] = 'native'
|
||||
|
||||
return node
|
||||
|
||||
|
||||
def drv_nbd(host, port):
|
||||
|
@ -106,6 +138,10 @@ def drv_nbd(host, port):
|
|||
'server': {'type': 'inet', 'host': host, 'port': port}}
|
||||
|
||||
|
||||
def drv_qcow2(file):
|
||||
return {'driver': 'qcow2', 'file': file}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
|
||||
|
|
|
@ -19,9 +19,17 @@
|
|||
#
|
||||
|
||||
import statistics
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
|
||||
def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
|
||||
def do_drop_caches():
|
||||
subprocess.run('sync; echo 3 > /proc/sys/vm/drop_caches', shell=True,
|
||||
check=True)
|
||||
|
||||
|
||||
def bench_one(test_func, test_env, test_case, count=5, initial_run=True,
|
||||
slow_limit=100, drop_caches=False):
|
||||
"""Benchmark one test-case
|
||||
|
||||
test_func -- benchmarking function with prototype
|
||||
|
@ -36,6 +44,9 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
|
|||
test_case -- test case - opaque second argument for test_func
|
||||
count -- how many times to call test_func, to calculate average
|
||||
initial_run -- do initial run of test_func, which don't get into result
|
||||
slow_limit -- stop at slow run (that exceedes the slow_limit by seconds).
|
||||
(initial run is not measured)
|
||||
drop_caches -- drop caches before each run
|
||||
|
||||
Returns dict with the following fields:
|
||||
'runs': list of test_func results
|
||||
|
@ -49,15 +60,25 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
|
|||
"""
|
||||
if initial_run:
|
||||
print(' #initial run:')
|
||||
do_drop_caches()
|
||||
print(' ', test_func(test_env, test_case))
|
||||
|
||||
runs = []
|
||||
for i in range(count):
|
||||
t = time.time()
|
||||
|
||||
print(' #run {}'.format(i+1))
|
||||
do_drop_caches()
|
||||
res = test_func(test_env, test_case)
|
||||
print(' ', res)
|
||||
runs.append(res)
|
||||
|
||||
if time.time() - t > slow_limit:
|
||||
print(' - run is too slow, stop here')
|
||||
break
|
||||
|
||||
count = len(runs)
|
||||
|
||||
result = {'runs': runs}
|
||||
|
||||
succeeded = [r for r in runs if ('seconds' in r or 'iops' in r)]
|
||||
|
@ -71,7 +92,10 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
|
|||
dim = 'seconds'
|
||||
result['dimension'] = dim
|
||||
result['average'] = statistics.mean(r[dim] for r in succeeded)
|
||||
result['stdev'] = statistics.stdev(r[dim] for r in succeeded)
|
||||
if len(succeeded) == 1:
|
||||
result['stdev'] = 0
|
||||
else:
|
||||
result['stdev'] = statistics.stdev(r[dim] for r in succeeded)
|
||||
|
||||
if len(succeeded) < count:
|
||||
result['n-failed'] = count - len(succeeded)
|
||||
|
|
Loading…
Reference in New Issue