Block patches:

- Allow blockdev-backup from nodes that are not in qemu's main AIO
   context to newly added nodes
 - Add salvaging mode to qemu-img convert
 - Minor fixes to tests, documentation, and for less Valgrind annoyance
 -----BEGIN PGP SIGNATURE-----
 
 iQFGBAABCAAwFiEEkb62CjDbPohX0Rgp9AfbAGHVz0AFAl0Do0MSHG1yZWl0ekBy
 ZWRoYXQuY29tAAoJEPQH2wBh1c9AYfAH/3Z4Ua5Pwown99TH57jdfHBzdWnhwQV6
 fEJs1viFOufb9RS/3UB3JlPZ71Jf+4y97g9NrRY05BQ3ZJPwlm9moSdM7GPQ6Ni3
 OVqHP6shnOHlBHI897JRDPjpLGEujIVk59EE9wB7e/T8rpGo21yJ91u5Ha4T9M2/
 jsHwL/1NPCe8OxdCMSNJmIPA2PqouBjvEGdbCwa6sOVNe+pFJnhStEZOjnP+rmmb
 wt5vk+NR40rdsuAoUZ7mTr0GhUDtJAL9vkJgW1uH7b9mzAsxklsWpct6oxDwEYJL
 BURVLsT2g8QQwX7pebaOkys0ktPGkegtv785yh8KhPf2Yf6S8L1vS4M=
 =o8kg
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2019-06-14' into staging

Block patches:
- Allow blockdev-backup from nodes that are not in qemu's main AIO
  context to newly added nodes
- Add salvaging mode to qemu-img convert
- Minor fixes to tests, documentation, and for less Valgrind annoyance

# gpg: Signature made Fri 14 Jun 2019 14:38:11 BST
# gpg:                using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40
# gpg:                issuer "mreitz@redhat.com"
# gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full]
# Primary key fingerprint: 91BE B60A 30DB 3E88 57D1  1829 F407 DB00 61D5 CF40

* remotes/maxreitz/tags/pull-block-2019-06-14:
  iotests: Test qemu-img convert -C --salvage
  iotests: Test qemu-img convert --salvage
  blkdebug: Inject errors on .bdrv_co_block_status()
  blkdebug: Add "none" event
  blkdebug: Add @iotype error option
  qemu-img: Add salvaging mode to convert
  qemu-img: Move quiet into ImgConvertState
  blockdev: Overlays are not snapshots
  qapi/block-core: Overlays are not snapshots
  qemu-img: Fix options leakage in img_rebase()
  iotests: restrict 254 to support only qcow2
  hw/block/fdc: floppy command FIFO memory initialization
  iotests: Fix intermittent failure in 219
  iotests: Filter 175's allocation information
  event_match: always match on None value
  iotests: add iotest 256 for testing blockdev-backup across iothread contexts
  iotests.py: rewrite run_job to be pickier
  QEMUMachine: add events_wait method
  iotests.py: do not use infinite waits
  blockdev-backup: don't check aio_context too early

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-06-14 14:46:13 +01:00
commit f3d0bec9f8
21 changed files with 773 additions and 117 deletions

View File

@ -75,6 +75,7 @@ typedef struct BlkdebugRule {
int state;
union {
struct {
uint64_t iotype_mask;
int error;
int immediately;
int once;
@ -91,6 +92,9 @@ typedef struct BlkdebugRule {
QSIMPLEQ_ENTRY(BlkdebugRule) active_next;
} BlkdebugRule;
QEMU_BUILD_BUG_MSG(BLKDEBUG_IO_TYPE__MAX > 64,
"BlkdebugIOType mask does not fit into an uint64_t");
static QemuOptsList inject_error_opts = {
.name = "inject-error",
.head = QTAILQ_HEAD_INITIALIZER(inject_error_opts.head),
@ -103,6 +107,10 @@ static QemuOptsList inject_error_opts = {
.name = "state",
.type = QEMU_OPT_NUMBER,
},
{
.name = "iotype",
.type = QEMU_OPT_STRING,
},
{
.name = "errno",
.type = QEMU_OPT_NUMBER,
@ -162,6 +170,8 @@ static int add_rule(void *opaque, QemuOpts *opts, Error **errp)
int event;
struct BlkdebugRule *rule;
int64_t sector;
BlkdebugIOType iotype;
Error *local_error = NULL;
/* Find the right event for the rule */
event_name = qemu_opt_get(opts, "event");
@ -192,6 +202,26 @@ static int add_rule(void *opaque, QemuOpts *opts, Error **errp)
sector = qemu_opt_get_number(opts, "sector", -1);
rule->options.inject.offset =
sector == -1 ? -1 : sector * BDRV_SECTOR_SIZE;
iotype = qapi_enum_parse(&BlkdebugIOType_lookup,
qemu_opt_get(opts, "iotype"),
BLKDEBUG_IO_TYPE__MAX, &local_error);
if (local_error) {
error_propagate(errp, local_error);
return -1;
}
if (iotype != BLKDEBUG_IO_TYPE__MAX) {
rule->options.inject.iotype_mask = (1ull << iotype);
} else {
/* Apply the default */
rule->options.inject.iotype_mask =
(1ull << BLKDEBUG_IO_TYPE_READ)
| (1ull << BLKDEBUG_IO_TYPE_WRITE)
| (1ull << BLKDEBUG_IO_TYPE_WRITE_ZEROES)
| (1ull << BLKDEBUG_IO_TYPE_DISCARD)
| (1ull << BLKDEBUG_IO_TYPE_FLUSH);
}
break;
case ACTION_SET_STATE:
@ -461,6 +491,8 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
goto out;
}
bdrv_debug_event(bs, BLKDBG_NONE);
ret = 0;
out:
if (ret < 0) {
@ -470,7 +502,8 @@ out:
return ret;
}
static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes)
static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
BlkdebugIOType iotype)
{
BDRVBlkdebugState *s = bs->opaque;
BlkdebugRule *rule = NULL;
@ -480,9 +513,10 @@ static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes)
QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) {
uint64_t inject_offset = rule->options.inject.offset;
if (inject_offset == -1 ||
(bytes && inject_offset >= offset &&
inject_offset < offset + bytes))
if ((inject_offset == -1 ||
(bytes && inject_offset >= offset &&
inject_offset < offset + bytes)) &&
(rule->options.inject.iotype_mask & (1ull << iotype)))
{
break;
}
@ -521,7 +555,7 @@ blkdebug_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
assert(bytes <= bs->bl.max_transfer);
}
err = rule_check(bs, offset, bytes);
err = rule_check(bs, offset, bytes, BLKDEBUG_IO_TYPE_READ);
if (err) {
return err;
}
@ -542,7 +576,7 @@ blkdebug_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
assert(bytes <= bs->bl.max_transfer);
}
err = rule_check(bs, offset, bytes);
err = rule_check(bs, offset, bytes, BLKDEBUG_IO_TYPE_WRITE);
if (err) {
return err;
}
@ -552,7 +586,7 @@ blkdebug_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
static int blkdebug_co_flush(BlockDriverState *bs)
{
int err = rule_check(bs, 0, 0);
int err = rule_check(bs, 0, 0, BLKDEBUG_IO_TYPE_FLUSH);
if (err) {
return err;
@ -586,7 +620,7 @@ static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs,
assert(bytes <= bs->bl.max_pwrite_zeroes);
}
err = rule_check(bs, offset, bytes);
err = rule_check(bs, offset, bytes, BLKDEBUG_IO_TYPE_WRITE_ZEROES);
if (err) {
return err;
}
@ -620,7 +654,7 @@ static int coroutine_fn blkdebug_co_pdiscard(BlockDriverState *bs,
assert(bytes <= bs->bl.max_pdiscard);
}
err = rule_check(bs, offset, bytes);
err = rule_check(bs, offset, bytes, BLKDEBUG_IO_TYPE_DISCARD);
if (err) {
return err;
}
@ -636,7 +670,15 @@ static int coroutine_fn blkdebug_co_block_status(BlockDriverState *bs,
int64_t *map,
BlockDriverState **file)
{
int err;
assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment));
err = rule_check(bs, offset, bytes, BLKDEBUG_IO_TYPE_BLOCK_STATUS);
if (err) {
return err;
}
return bdrv_co_block_status_from_file(bs, want_zero, offset, bytes,
pnum, map, file);
}

View File

@ -1608,13 +1608,13 @@ static void external_snapshot_prepare(BlkActionState *common,
s->has_snapshot_node_name ? s->snapshot_node_name : NULL;
if (node_name && !snapshot_node_name) {
error_setg(errp, "New snapshot node name missing");
error_setg(errp, "New overlay node name missing");
goto out;
}
if (snapshot_node_name &&
bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) {
error_setg(errp, "New snapshot node name already in use");
error_setg(errp, "New overlay node name already in use");
goto out;
}
@ -1656,7 +1656,7 @@ static void external_snapshot_prepare(BlkActionState *common,
}
if (bdrv_has_blk(state->new_bs)) {
error_setg(errp, "The snapshot is already in use");
error_setg(errp, "The overlay is already in use");
goto out;
}
@ -1666,12 +1666,12 @@ static void external_snapshot_prepare(BlkActionState *common,
}
if (state->new_bs->backing != NULL) {
error_setg(errp, "The snapshot already has a backing image");
error_setg(errp, "The overlay already has a backing image");
goto out;
}
if (!state->new_bs->drv->supports_backing) {
error_setg(errp, "The snapshot does not support backing images");
error_setg(errp, "The overlay does not support backing images");
goto out;
}
@ -1876,10 +1876,6 @@ static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
}
aio_context = bdrv_get_aio_context(bs);
if (aio_context != bdrv_get_aio_context(target)) {
error_setg(errp, "Backup between two IO threads is not implemented");
return;
}
aio_context_acquire(aio_context);
state->bs = bs;

View File

@ -2648,6 +2648,7 @@ static void fdctrl_realize_common(DeviceState *dev, FDCtrl *fdctrl,
FLOPPY_DPRINTF("init controller\n");
fdctrl->fifo = qemu_memalign(512, FD_SECTOR_LEN);
memset(fdctrl->fifo, 0, FD_SECTOR_LEN);
fdctrl->fifo_size = 512;
fdctrl->result_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
fdctrl_result_timer, fdctrl);

View File

@ -402,42 +402,75 @@ def get_qmp_events(self, wait=False):
self._qmp.clear_events()
return events
def event_wait(self, name, timeout=60.0, match=None):
@staticmethod
def event_match(event, match=None):
"""
Wait for specified timeout on named event in QMP; optionally filter
results by match.
Check if an event matches optional match criteria.
The 'match' is checked to be a recursive subset of the 'event'; skips
branch processing on match's value None
{"foo": {"bar": 1}} matches {"foo": None}
{"foo": {"bar": 1}} does not matches {"foo": {"baz": None}}
The match criteria takes the form of a matching subdict. The event is
checked to be a superset of the subdict, recursively, with matching
values whenever the subdict values are not None.
This has a limitation that you cannot explicitly check for None values.
Examples, with the subdict queries on the left:
- None matches any object.
- {"foo": None} matches {"foo": {"bar": 1}}
- {"foo": None} matches {"foo": 5}
- {"foo": {"abc": None}} does not match {"foo": {"bar": 1}}
- {"foo": {"rab": 2}} matches {"foo": {"bar": 1, "rab": 2}}
"""
def event_match(event, match=None):
if match is None:
return True
if match is None:
return True
try:
for key in match:
if key in event:
if isinstance(event[key], dict):
if not event_match(event[key], match[key]):
return False
elif event[key] != match[key]:
if not QEMUMachine.event_match(event[key], match[key]):
return False
else:
return False
return True
except TypeError:
# either match or event wasn't iterable (not a dict)
return match == event
def event_wait(self, name, timeout=60.0, match=None):
"""
event_wait waits for and returns a named event from QMP with a timeout.
name: The event to wait for.
timeout: QEMUMonitorProtocol.pull_event timeout parameter.
match: Optional match criteria. See event_match for details.
"""
return self.events_wait([(name, match)], timeout)
def events_wait(self, events, timeout=60.0):
"""
events_wait waits for and returns a named event from QMP with a timeout.
events: a sequence of (name, match_criteria) tuples.
The match criteria are optional and may be None.
See event_match for details.
timeout: QEMUMonitorProtocol.pull_event timeout parameter.
"""
def _match(event):
for name, match in events:
if (event['event'] == name and
self.event_match(event, match)):
return True
return False
# Search cached events
for event in self._events:
if (event['event'] == name) and event_match(event, match):
if _match(event):
self._events.remove(event)
return event
# Poll for new events
while True:
event = self._qmp.pull_event(wait=timeout)
if (event['event'] == name) and event_match(event, match):
if _match(event):
return event
self._events.append(event)

View File

@ -1279,17 +1279,17 @@
#
# Either @device or @node-name must be set but not both.
#
# @device: the name of the device to generate the snapshot from.
# @device: the name of the device to take a snapshot of.
#
# @node-name: graph node name to generate the snapshot from (Since 2.0)
#
# @snapshot-file: the target of the new image. If the file exists, or
# if it is a device, the snapshot will be created in the existing
# file/device. Otherwise, a new file will be created.
# @snapshot-file: the target of the new overlay image. If the file
# exists, or if it is a device, the overlay will be created in the
# existing file/device. Otherwise, a new file will be created.
#
# @snapshot-node-name: the graph node name of the new image (Since 2.0)
#
# @format: the format of the snapshot image, default is 'qcow2'.
# @format: the format of the overlay image, default is 'qcow2'.
#
# @mode: whether and how QEMU should create a new image, default is
# 'absolute-paths'.
@ -1302,10 +1302,10 @@
##
# @BlockdevSnapshot:
#
# @node: device or node name that will have a snapshot created.
# @node: device or node name that will have a snapshot taken.
#
# @overlay: reference to the existing block device that will become
# the overlay of @node, as part of creating the snapshot.
# the overlay of @node, as part of taking the snapshot.
# It must not have a current backing file (this can be
# achieved by passing "backing": null to blockdev-add).
#
@ -1443,7 +1443,7 @@
##
# @blockdev-snapshot-sync:
#
# Generates a synchronous snapshot of a block device.
# Takes a synchronous snapshot of a block device.
#
# For the arguments, see the documentation of BlockdevSnapshotSync.
#
@ -1469,9 +1469,9 @@
##
# @blockdev-snapshot:
#
# Generates a snapshot of a block device.
# Takes a snapshot of a block device.
#
# Create a snapshot, by installing 'node' as the backing image of
# Take a snapshot, by installing 'node' as the backing image of
# 'overlay'. Additionally, if 'node' is associated with a block
# device, the block device changes to using 'overlay' as its new active
# image.
@ -3244,6 +3244,8 @@
#
# @cluster_alloc_space: an allocation of file space for a cluster (since 4.1)
#
# @none: triggers once at creation of the blkdebug node (since 4.1)
#
# Since: 2.9
##
{ 'enum': 'BlkdebugEvent', 'prefix': 'BLKDBG',
@ -3262,7 +3264,30 @@
'pwritev_rmw_tail', 'pwritev_rmw_after_tail', 'pwritev',
'pwritev_zero', 'pwritev_done', 'empty_image_prepare',
'l1_shrink_write_table', 'l1_shrink_free_l2_clusters',
'cor_write', 'cluster_alloc_space'] }
'cor_write', 'cluster_alloc_space', 'none'] }
##
# @BlkdebugIOType:
#
# Kinds of I/O that blkdebug can inject errors in.
#
# @read: .bdrv_co_preadv()
#
# @write: .bdrv_co_pwritev()
#
# @write-zeroes: .bdrv_co_pwrite_zeroes()
#
# @discard: .bdrv_co_pdiscard()
#
# @flush: .bdrv_co_flush_to_disk()
#
# @block-status: .bdrv_co_block_status()
#
# Since: 4.1
##
{ 'enum': 'BlkdebugIOType', 'prefix': 'BLKDEBUG_IO_TYPE',
'data': [ 'read', 'write', 'write-zeroes', 'discard', 'flush',
'block-status' ] }
##
# @BlkdebugInjectErrorOptions:
@ -3274,6 +3299,11 @@
# @state: the state identifier blkdebug needs to be in to
# actually trigger the event; defaults to "any"
#
# @iotype: the type of I/O operations on which this error should
# be injected; defaults to "all read, write,
# write-zeroes, discard, and flush operations"
# (since: 4.1)
#
# @errno: error identifier (errno) to be returned; defaults to
# EIO
#
@ -3291,6 +3321,7 @@
{ 'struct': 'BlkdebugInjectErrorOptions',
'data': { 'event': 'BlkdebugEvent',
'*state': 'int',
'*iotype': 'BlkdebugIOType',
'*errno': 'int',
'*sector': 'int',
'*once': 'bool',

View File

@ -44,9 +44,9 @@ STEXI
ETEXI
DEF("convert", img_convert,
"convert [--object objectdef] [--image-opts] [--target-image-opts] [-U] [-C] [-c] [-p] [-q] [-n] [-f fmt] [-t cache] [-T src_cache] [-O output_fmt] [-B backing_file] [-o options] [-l snapshot_param] [-S sparse_size] [-m num_coroutines] [-W] filename [filename2 [...]] output_filename")
"convert [--object objectdef] [--image-opts] [--target-image-opts] [-U] [-C] [-c] [-p] [-q] [-n] [-f fmt] [-t cache] [-T src_cache] [-O output_fmt] [-B backing_file] [-o options] [-l snapshot_param] [-S sparse_size] [-m num_coroutines] [-W] [--salvage] filename [filename2 [...]] output_filename")
STEXI
@item convert [--object @var{objectdef}] [--image-opts] [--target-image-opts] [-U] [-C] [-c] [-p] [-q] [-n] [-f @var{fmt}] [-t @var{cache}] [-T @var{src_cache}] [-O @var{output_fmt}] [-B @var{backing_file}] [-o @var{options}] [-l @var{snapshot_param}] [-S @var{sparse_size}] [-m @var{num_coroutines}] [-W] @var{filename} [@var{filename2} [...]] @var{output_filename}
@item convert [--object @var{objectdef}] [--image-opts] [--target-image-opts] [-U] [-C] [-c] [-p] [-q] [-n] [-f @var{fmt}] [-t @var{cache}] [-T @var{src_cache}] [-O @var{output_fmt}] [-B @var{backing_file}] [-o @var{options}] [-l @var{snapshot_param}] [-S @var{sparse_size}] [-m @var{num_coroutines}] [-W] [--salvage] @var{filename} [@var{filename2} [...]] @var{output_filename}
ETEXI
DEF("create", img_create,

View File

@ -69,6 +69,7 @@ enum {
OPTION_SIZE = 264,
OPTION_PREALLOCATION = 265,
OPTION_SHRINK = 266,
OPTION_SALVAGE = 267,
};
typedef enum OutputFormat {
@ -1581,6 +1582,8 @@ typedef struct ImgConvertState {
int64_t target_backing_sectors; /* negative if unknown */
bool wr_in_order;
bool copy_range;
bool salvage;
bool quiet;
int min_sparse;
int alignment;
size_t cluster_sectors;
@ -1627,25 +1630,44 @@ static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num)
}
if (s->sector_next_status <= sector_num) {
int64_t count = n * BDRV_SECTOR_SIZE;
uint64_t offset = (sector_num - src_cur_offset) * BDRV_SECTOR_SIZE;
int64_t count;
if (s->target_has_backing) {
do {
count = n * BDRV_SECTOR_SIZE;
if (s->target_has_backing) {
ret = bdrv_block_status(blk_bs(s->src[src_cur]), offset,
count, &count, NULL, NULL);
} else {
ret = bdrv_block_status_above(blk_bs(s->src[src_cur]), NULL,
offset, count, &count, NULL,
NULL);
}
if (ret < 0) {
if (s->salvage) {
if (n == 1) {
if (!s->quiet) {
warn_report("error while reading block status at "
"offset %" PRIu64 ": %s", offset,
strerror(-ret));
}
/* Just try to read the data, then */
ret = BDRV_BLOCK_DATA;
count = BDRV_SECTOR_SIZE;
} else {
/* Retry on a shorter range */
n = DIV_ROUND_UP(n, 4);
}
} else {
error_report("error while reading block status at offset "
"%" PRIu64 ": %s", offset, strerror(-ret));
return ret;
}
}
} while (ret < 0);
ret = bdrv_block_status(blk_bs(s->src[src_cur]),
(sector_num - src_cur_offset) *
BDRV_SECTOR_SIZE,
count, &count, NULL, NULL);
} else {
ret = bdrv_block_status_above(blk_bs(s->src[src_cur]), NULL,
(sector_num - src_cur_offset) *
BDRV_SECTOR_SIZE,
count, &count, NULL, NULL);
}
if (ret < 0) {
error_report("error while reading block status of sector %" PRId64
": %s", sector_num, strerror(-ret));
return ret;
}
n = DIV_ROUND_UP(count, BDRV_SECTOR_SIZE);
if (ret & BDRV_BLOCK_ZERO) {
@ -1682,6 +1704,7 @@ static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num)
static int coroutine_fn convert_co_read(ImgConvertState *s, int64_t sector_num,
int nb_sectors, uint8_t *buf)
{
uint64_t single_read_until = 0;
int n, ret;
assert(nb_sectors <= s->buf_sectors);
@ -1689,6 +1712,7 @@ static int coroutine_fn convert_co_read(ImgConvertState *s, int64_t sector_num,
BlockBackend *blk;
int src_cur;
int64_t bs_sectors, src_cur_offset;
uint64_t offset;
/* In the case of compression with multiple source files, we can get a
* nb_sectors that spreads into the next part. So we must be able to
@ -1697,13 +1721,29 @@ static int coroutine_fn convert_co_read(ImgConvertState *s, int64_t sector_num,
blk = s->src[src_cur];
bs_sectors = s->src_sectors[src_cur];
n = MIN(nb_sectors, bs_sectors - (sector_num - src_cur_offset));
offset = (sector_num - src_cur_offset) << BDRV_SECTOR_BITS;
ret = blk_co_pread(
blk, (sector_num - src_cur_offset) << BDRV_SECTOR_BITS,
n << BDRV_SECTOR_BITS, buf, 0);
n = MIN(nb_sectors, bs_sectors - (sector_num - src_cur_offset));
if (single_read_until > offset) {
n = 1;
}
ret = blk_co_pread(blk, offset, n << BDRV_SECTOR_BITS, buf, 0);
if (ret < 0) {
return ret;
if (s->salvage) {
if (n > 1) {
single_read_until = offset + (n << BDRV_SECTOR_BITS);
continue;
} else {
if (!s->quiet) {
warn_report("error while reading offset %" PRIu64
": %s", offset, strerror(-ret));
}
memset(buf, 0, BDRV_SECTOR_SIZE);
}
} else {
return ret;
}
}
sector_num += n;
@ -2012,7 +2052,7 @@ static int img_convert(int argc, char **argv)
QDict *open_opts = NULL;
char *options = NULL;
Error *local_err = NULL;
bool writethrough, src_writethrough, quiet = false, image_opts = false,
bool writethrough, src_writethrough, image_opts = false,
skip_create = false, progress = false, tgt_image_opts = false;
int64_t ret = -EINVAL;
bool force_share = false;
@ -2034,6 +2074,7 @@ static int img_convert(int argc, char **argv)
{"image-opts", no_argument, 0, OPTION_IMAGE_OPTS},
{"force-share", no_argument, 0, 'U'},
{"target-image-opts", no_argument, 0, OPTION_TARGET_IMAGE_OPTS},
{"salvage", no_argument, 0, OPTION_SALVAGE},
{0, 0, 0, 0}
};
c = getopt_long(argc, argv, ":hf:O:B:Cco:l:S:pt:T:qnm:WU",
@ -2120,7 +2161,7 @@ static int img_convert(int argc, char **argv)
src_cache = optarg;
break;
case 'q':
quiet = true;
s.quiet = true;
break;
case 'n':
skip_create = true;
@ -2151,6 +2192,9 @@ static int img_convert(int argc, char **argv)
case OPTION_IMAGE_OPTS:
image_opts = true;
break;
case OPTION_SALVAGE:
s.salvage = true;
break;
case OPTION_TARGET_IMAGE_OPTS:
tgt_image_opts = true;
break;
@ -2177,6 +2221,11 @@ static int img_convert(int argc, char **argv)
goto fail_getopt;
}
if (s.copy_range && s.salvage) {
error_report("Cannot use copy offloading in salvaging mode");
goto fail_getopt;
}
if (tgt_image_opts && !skip_create) {
error_report("--target-image-opts requires use of -n flag");
goto fail_getopt;
@ -2209,7 +2258,7 @@ static int img_convert(int argc, char **argv)
}
/* Initialize before goto out */
if (quiet) {
if (s.quiet) {
progress = false;
}
qemu_progress_init(progress, 1.0);
@ -2220,7 +2269,7 @@ static int img_convert(int argc, char **argv)
for (bs_i = 0; bs_i < s.src_num; bs_i++) {
s.src[bs_i] = img_open(image_opts, argv[optind + bs_i],
fmt, src_flags, src_writethrough, quiet,
fmt, src_flags, src_writethrough, s.quiet,
force_share);
if (!s.src[bs_i]) {
ret = -1;
@ -2383,7 +2432,7 @@ static int img_convert(int argc, char **argv)
if (skip_create) {
s.target = img_open(tgt_image_opts, out_filename, out_fmt,
flags, writethrough, quiet, false);
flags, writethrough, s.quiet, false);
} else {
/* TODO ultimately we should allow --target-image-opts
* to be used even when -n is not given.
@ -2391,7 +2440,7 @@ static int img_convert(int argc, char **argv)
* to allow filenames in option syntax
*/
s.target = img_open_file(out_filename, open_opts, out_fmt,
flags, writethrough, quiet, false);
flags, writethrough, s.quiet, false);
open_opts = NULL; /* blk_new_open will have freed it */
}
if (!s.target) {
@ -3350,6 +3399,7 @@ static int img_rebase(int argc, char **argv)
out_baseimg,
&local_err);
if (local_err) {
qobject_unref(options);
error_reportf_err(local_err,
"Could not resolve backing filename: ");
ret = -1;
@ -3362,7 +3412,9 @@ static int img_rebase(int argc, char **argv)
*/
prefix_chain_bs = bdrv_find_backing_image(bs, out_real_path);
if (prefix_chain_bs) {
qobject_unref(options);
g_free(out_real_path);
blk_new_backing = blk_new(qemu_get_aio_context(),
BLK_PERM_CONSISTENT_READ,
BLK_PERM_ALL);

View File

@ -175,6 +175,10 @@ improve performance if the data is remote, such as with NFS or iSCSI backends,
but will not automatically sparsify zero sectors, and may result in a fully
allocated target image depending on the host support for getting allocation
information.
@item --salvage
Try to ignore I/O errors when reading. Unless in quiet mode (@code{-q}), errors
will still be printed. Areas that cannot be read from the source will be
treated as containing only zeroes.
@end table
Parameters to dd subcommand:

View File

@ -162,6 +162,7 @@ echo === convert: -C and other options ===
run_qemu_img convert -C -S 4k -O $IMGFMT "$TEST_IMG" "$TEST_IMG".target
run_qemu_img convert -C -S 8k -O $IMGFMT "$TEST_IMG" "$TEST_IMG".target
run_qemu_img convert -C -c -O $IMGFMT "$TEST_IMG" "$TEST_IMG".target
run_qemu_img convert -C --salvage -O $IMGFMT "$TEST_IMG" "$TEST_IMG".target
echo
echo === amend: Options specified more than once ===

View File

@ -567,6 +567,9 @@ qemu-img: Cannot enable copy offloading when -S is used
Testing: convert -C -c -O qcow2 TEST_DIR/t.qcow2 TEST_DIR/t.qcow2.target
qemu-img: Cannot enable copy offloading when -c is used
Testing: convert -C --salvage -O qcow2 TEST_DIR/t.qcow2 TEST_DIR/t.qcow2.target
qemu-img: Cannot use copy offloading in salvaging mode
=== amend: Options specified more than once ===
Testing: amend -f foo -f qcow2 -o lazy_refcounts=on TEST_DIR/t.qcow2

View File

@ -64,13 +64,13 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/
=== Invalid command - cannot create a snapshot using a file BDS ===
{"error": {"class": "GenericError", "desc": "The snapshot does not support backing images"}}
{"error": {"class": "GenericError", "desc": "The overlay does not support backing images"}}
=== Invalid command - snapshot node used as active layer ===
{"error": {"class": "GenericError", "desc": "The snapshot is already in use"}}
{"error": {"class": "GenericError", "desc": "The snapshot is already in use"}}
{"error": {"class": "GenericError", "desc": "The snapshot is already in use"}}
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
=== Invalid command - snapshot node used as backing hd ===
@ -81,7 +81,7 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/
Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/t.IMGFMT.base
{"return": {}}
{"error": {"class": "GenericError", "desc": "The snapshot already has a backing image"}}
{"error": {"class": "GenericError", "desc": "The overlay already has a backing image"}}
=== Invalid command - The node does not exist ===

View File

@ -28,10 +28,25 @@ status=1 # failure is the default!
_cleanup()
{
_cleanup_test_img
_cleanup_test_img
rm -f "$TEST_DIR/empty"
}
trap "_cleanup; exit \$status" 0 1 2 3 15
# Some file systems sometimes allocate extra blocks independently of
# the file size. This function hides the resulting difference in the
# stat -c '%b' output.
# Parameter 1: Number of blocks an empty file occupies
# Parameter 2: Image size in bytes
_filter_blocks()
{
extra_blocks=$1
img_size=$2
sed -e "s/blocks=$extra_blocks\\(\$\\|[^0-9]\\)/nothing allocated/" \
-e "s/blocks=$((extra_blocks + img_size / 512))\\(\$\\|[^0-9]\\)/everything allocated/"
}
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
@ -40,18 +55,21 @@ _supported_fmt raw
_supported_proto file
_supported_os Linux
size=1m
size=$((1 * 1024 * 1024))
touch "$TEST_DIR/empty"
extra_blocks=$(stat -c '%b' "$TEST_DIR/empty")
echo
echo "== creating image with default preallocation =="
_make_test_img $size | _filter_imgfmt
stat -c "size=%s, blocks=%b" $TEST_IMG
stat -c "size=%s, blocks=%b" $TEST_IMG | _filter_blocks $extra_blocks $size
for mode in off full falloc; do
echo
echo "== creating image with preallocation $mode =="
IMGOPTS=preallocation=$mode _make_test_img $size | _filter_imgfmt
stat -c "size=%s, blocks=%b" $TEST_IMG
stat -c "size=%s, blocks=%b" $TEST_IMG | _filter_blocks $extra_blocks $size
done
# success, all done

View File

@ -2,17 +2,17 @@ QA output created by 175
== creating image with default preallocation ==
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576
size=1048576, blocks=0
size=1048576, nothing allocated
== creating image with preallocation off ==
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 preallocation=off
size=1048576, blocks=0
size=1048576, nothing allocated
== creating image with preallocation full ==
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 preallocation=full
size=1048576, blocks=2048
size=1048576, everything allocated
== creating image with preallocation falloc ==
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 preallocation=falloc
size=1048576, blocks=2048
size=1048576, everything allocated
*** done

View File

@ -23,6 +23,8 @@ import iotests
iotests.verify_image_format(supported_fmts=['qcow2'])
img_size = 4 * 1024 * 1024
def pause_wait(vm, job_id):
with iotests.Timeout(3, "Timeout waiting for job to pause"):
while True:
@ -62,6 +64,8 @@ def test_pause_resume(vm):
iotests.log(vm.qmp('query-jobs'))
def test_job_lifecycle(vm, job, job_args, has_ready=False):
global img_size
iotests.log('')
iotests.log('')
iotests.log('Starting block job: %s (auto-finalize: %s; auto-dismiss: %s)' %
@ -84,6 +88,10 @@ def test_job_lifecycle(vm, job, job_args, has_ready=False):
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE')))
# Wait for total-progress to stabilize
while vm.qmp('query-jobs')['return'][0]['total-progress'] < img_size:
pass
# RUNNING state:
# pause/resume should work, complete/finalize/dismiss should error out
iotests.log('')
@ -173,9 +181,8 @@ with iotests.FilePath('disk.img') as disk_path, \
iotests.FilePath('copy.img') as copy_path, \
iotests.VM() as vm:
img_size = '4M'
iotests.qemu_img_create('-f', iotests.imgfmt, disk_path, img_size)
iotests.qemu_io('-c', 'write 0 %s' % (img_size),
iotests.qemu_img_create('-f', iotests.imgfmt, disk_path, str(img_size))
iotests.qemu_io('-c', 'write 0 %i' % (img_size),
'-f', iotests.imgfmt, disk_path)
iotests.log('Launching VM...')

170
tests/qemu-iotests/251 Executable file
View File

@ -0,0 +1,170 @@
#!/usr/bin/env bash
#
# Test qemu-img convert --salvage
#
# Copyright (C) 2019 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# creator
owner=mreitz@redhat.com
seq=$(basename $0)
echo "QA output created by $seq"
status=1 # failure is the default!
_cleanup()
{
_cleanup_test_img
}
trap "_cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
. ./common.qemu
_supported_fmt generic
_supported_proto file
_supported_os Linux
if [ "$IMGOPTSSYNTAX" = "true" ]; then
# We use json:{} filenames here, so we cannot work with additional options.
_unsupported_fmt $IMGFMT
else
# With VDI, the output is ordered differently. Just disable it.
_unsupported_fmt vdi
fi
TEST_IMG="$TEST_IMG.orig" _make_test_img 64M
$QEMU_IO -c 'write -P 42 0 64M' "$TEST_IMG.orig" | _filter_qemu_io
sector_size=512
# Offsets on which to fail block-status. Keep in ascending order so
# the indexing done by _filter_offsets will appear in ascending order
# in the output as well.
status_fail_offsets="$((16 * 1024 * 1024 + 8192))
$((33 * 1024 * 1024 + 512))"
# Offsets on which to fail reads. Keep in ascending order for the
# same reason.
# The second element is shared with $status_fail_offsets on purpose.
# Starting with the third element, we test what happens when a
# continuous range of sectors is inaccessible.
read_fail_offsets="$((32 * 1024 * 1024 - 65536))
$((33 * 1024 * 1024 + 512))
$(seq $((34 * 1024 * 1024)) $sector_size \
$((34 * 1024 * 1024 + 4096 - $sector_size)))"
# blkdebug must be above the format layer so it can intercept all
# block-status events
source_img="json:{'driver': 'blkdebug',
'image': {
'driver': '$IMGFMT',
'file': {
'driver': 'file',
'filename': '$TEST_IMG.orig'
}
},
'inject-error': ["
for ofs in $status_fail_offsets
do
source_img+="{ 'event': 'none',
'iotype': 'block-status',
'errno': 5,
'sector': $((ofs / sector_size)) },"
done
for ofs in $read_fail_offsets
do
source_img+="{ 'event': 'none',
'iotype': 'read',
'errno': 5,
'sector': $((ofs / sector_size)) },"
done
# Remove the trailing comma and terminate @inject-error and json:{}
source_img="${source_img%,} ] }"
echo
_filter_offsets() {
filters=
index=0
for ofs in $1
do
filters+=" -e s/$ofs/status_fail_offset_$index/"
index=$((index + 1))
done
index=0
for ofs in $2
do
filters+=" -e s/$ofs/read_fail_offset_$index/"
index=$((index + 1))
done
sed $filters
}
# While determining the number of allocated sectors in the input
# image, we should see one block status warning per element of
# $status_fail_offsets.
#
# Then, the image is read. Since the block status is queried in
# basically the same way, the same warnings as in the previous step
# should reappear. Interleaved with those we should see a read
# warning per element of $read_fail_offsets.
# Note that $read_fail_offsets and $status_fail_offsets share an
# element (read_fail_offset_1 == status_fail_offset_1), so
# "status_fail_offset_1" in the output is the same as
# "read_fail_offset_1".
$QEMU_IMG convert --salvage "$source_img" "$TEST_IMG" 2>&1 \
| _filter_offsets "$status_fail_offsets" "$read_fail_offsets"
echo
# The offsets where the block status could not be determined should
# have been treated as containing data and thus should be correct in
# the output image.
# The offsets where reading failed altogether should be 0. Make them
# 0 in the input image, too, so we can compare both images.
for ofs in $read_fail_offsets
do
$QEMU_IO -c "write -z $ofs $sector_size" "$TEST_IMG.orig" \
| _filter_qemu_io \
| _filter_offsets '' "$read_fail_offsets"
done
echo
# These should be equal now.
$QEMU_IMG compare "$TEST_IMG.orig" "$TEST_IMG"
# success, all done
echo "*** done"
rm -f $seq.full
status=0

View File

@ -0,0 +1,43 @@
QA output created by 251
Formatting 'TEST_DIR/t.IMGFMT.orig', fmt=IMGFMT size=67108864
wrote 67108864/67108864 bytes at offset 0
64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
qemu-img: warning: error while reading block status at offset status_fail_offset_0: Input/output error
qemu-img: warning: error while reading block status at offset status_fail_offset_1: Input/output error
qemu-img: warning: error while reading block status at offset status_fail_offset_0: Input/output error
qemu-img: warning: error while reading offset read_fail_offset_0: Input/output error
qemu-img: warning: error while reading block status at offset status_fail_offset_1: Input/output error
qemu-img: warning: error while reading offset status_fail_offset_1: Input/output error
qemu-img: warning: error while reading offset read_fail_offset_2: Input/output error
qemu-img: warning: error while reading offset read_fail_offset_3: Input/output error
qemu-img: warning: error while reading offset read_fail_offset_4: Input/output error
qemu-img: warning: error while reading offset read_fail_offset_5: Input/output error
qemu-img: warning: error while reading offset read_fail_offset_6: Input/output error
qemu-img: warning: error while reading offset read_fail_offset_7: Input/output error
qemu-img: warning: error while reading offset read_fail_offset_8: Input/output error
qemu-img: warning: error while reading offset read_fail_offset_9: Input/output error
wrote 512/512 bytes at offset read_fail_offset_0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 512/512 bytes at offset read_fail_offset_1
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 512/512 bytes at offset read_fail_offset_2
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 512/512 bytes at offset read_fail_offset_3
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 512/512 bytes at offset read_fail_offset_4
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 512/512 bytes at offset read_fail_offset_5
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 512/512 bytes at offset read_fail_offset_6
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 512/512 bytes at offset read_fail_offset_7
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 512/512 bytes at offset read_fail_offset_8
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 512/512 bytes at offset read_fail_offset_9
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
Images are identical.
*** done

View File

@ -21,6 +21,8 @@
import iotests
from iotests import qemu_img_create, file_path, log
iotests.verify_image_format(supported_fmts=['qcow2'])
disk, top = file_path('disk', 'top')
size = 1024 * 1024

122
tests/qemu-iotests/256 Executable file
View File

@ -0,0 +1,122 @@
#!/usr/bin/env python
#
# Test incremental/backup across iothread contexts
#
# Copyright (c) 2019 John Snow for Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# owner=jsnow@redhat.com
import os
import iotests
from iotests import log
iotests.verify_image_format(supported_fmts=['qcow2'])
size = 64 * 1024 * 1024
with iotests.FilePath('img0') as img0_path, \
iotests.FilePath('img1') as img1_path, \
iotests.FilePath('img0-full') as img0_full_path, \
iotests.FilePath('img1-full') as img1_full_path, \
iotests.FilePath('img0-incr') as img0_incr_path, \
iotests.FilePath('img1-incr') as img1_incr_path, \
iotests.VM() as vm:
def create_target(filepath, name, size):
basename = os.path.basename(filepath)
nodename = "file_{}".format(basename)
log(vm.command('blockdev-create', job_id='job1',
options={
'driver': 'file',
'filename': filepath,
'size': 0,
}))
vm.run_job('job1')
log(vm.command('blockdev-add', driver='file',
node_name=nodename, filename=filepath))
log(vm.command('blockdev-create', job_id='job2',
options={
'driver': iotests.imgfmt,
'file': nodename,
'size': size,
}))
vm.run_job('job2')
log(vm.command('blockdev-add', driver=iotests.imgfmt,
node_name=name,
file=nodename))
log('--- Preparing images & VM ---\n')
vm.add_object('iothread,id=iothread0')
vm.add_object('iothread,id=iothread1')
vm.add_device('virtio-scsi-pci,id=scsi0,iothread=iothread0')
vm.add_device('virtio-scsi-pci,id=scsi1,iothread=iothread1')
iotests.qemu_img_create('-f', iotests.imgfmt, img0_path, str(size))
iotests.qemu_img_create('-f', iotests.imgfmt, img1_path, str(size))
vm.add_drive(img0_path, interface='none')
vm.add_device('scsi-hd,id=device0,drive=drive0,bus=scsi0.0')
vm.add_drive(img1_path, interface='none')
vm.add_device('scsi-hd,id=device1,drive=drive1,bus=scsi1.0')
log('--- Starting VM ---\n')
vm.launch()
log('--- Create Targets & Full Backups ---\n')
create_target(img0_full_path, 'img0-full', size)
create_target(img1_full_path, 'img1-full', size)
ret = vm.qmp_log('transaction', indent=2, actions=[
{ 'type': 'block-dirty-bitmap-add',
'data': { 'node': 'drive0', 'name': 'bitmap0' }},
{ 'type': 'block-dirty-bitmap-add',
'data': { 'node': 'drive1', 'name': 'bitmap1' }},
{ 'type': 'blockdev-backup',
'data': { 'device': 'drive0',
'target': 'img0-full',
'sync': 'full',
'job-id': 'j0' }},
{ 'type': 'blockdev-backup',
'data': { 'device': 'drive1',
'target': 'img1-full',
'sync': 'full',
'job-id': 'j1' }}
])
if "error" in ret:
raise Exception(ret['error']['desc'])
vm.run_job('j0', auto_dismiss=True)
vm.run_job('j1', auto_dismiss=True)
log('\n--- Create Targets & Incremental Backups ---\n')
create_target(img0_incr_path, 'img0-incr', size)
create_target(img1_incr_path, 'img1-incr', size)
ret = vm.qmp_log('transaction', indent=2, actions=[
{ 'type': 'blockdev-backup',
'data': { 'device': 'drive0',
'target': 'img0-incr',
'sync': 'incremental',
'bitmap': 'bitmap0',
'job-id': 'j2' }},
{ 'type': 'blockdev-backup',
'data': { 'device': 'drive1',
'target': 'img1-incr',
'sync': 'incremental',
'bitmap': 'bitmap1',
'job-id': 'j3' }}
])
if "error" in ret:
raise Exception(ret['error']['desc'])
vm.run_job('j2', auto_dismiss=True)
vm.run_job('j3', auto_dismiss=True)
log('\n--- Done ---')
vm.shutdown()

119
tests/qemu-iotests/256.out Normal file
View File

@ -0,0 +1,119 @@
--- Preparing images & VM ---
--- Starting VM ---
--- Create Targets & Full Backups ---
{}
{"execute": "job-dismiss", "arguments": {"id": "job1"}}
{"return": {}}
{}
{}
{"execute": "job-dismiss", "arguments": {"id": "job2"}}
{"return": {}}
{}
{}
{"execute": "job-dismiss", "arguments": {"id": "job1"}}
{"return": {}}
{}
{}
{"execute": "job-dismiss", "arguments": {"id": "job2"}}
{"return": {}}
{}
{
"execute": "transaction",
"arguments": {
"actions": [
{
"data": {
"name": "bitmap0",
"node": "drive0"
},
"type": "block-dirty-bitmap-add"
},
{
"data": {
"name": "bitmap1",
"node": "drive1"
},
"type": "block-dirty-bitmap-add"
},
{
"data": {
"device": "drive0",
"job-id": "j0",
"sync": "full",
"target": "img0-full"
},
"type": "blockdev-backup"
},
{
"data": {
"device": "drive1",
"job-id": "j1",
"sync": "full",
"target": "img1-full"
},
"type": "blockdev-backup"
}
]
}
}
{
"return": {}
}
{"data": {"device": "j0", "len": 67108864, "offset": 67108864, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"device": "j1", "len": 67108864, "offset": 67108864, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
--- Create Targets & Incremental Backups ---
{}
{"execute": "job-dismiss", "arguments": {"id": "job1"}}
{"return": {}}
{}
{}
{"execute": "job-dismiss", "arguments": {"id": "job2"}}
{"return": {}}
{}
{}
{"execute": "job-dismiss", "arguments": {"id": "job1"}}
{"return": {}}
{}
{}
{"execute": "job-dismiss", "arguments": {"id": "job2"}}
{"return": {}}
{}
{
"execute": "transaction",
"arguments": {
"actions": [
{
"data": {
"bitmap": "bitmap0",
"device": "drive0",
"job-id": "j2",
"sync": "incremental",
"target": "img0-incr"
},
"type": "blockdev-backup"
},
{
"data": {
"bitmap": "bitmap1",
"device": "drive1",
"job-id": "j3",
"sync": "incremental",
"target": "img1-incr"
},
"type": "blockdev-backup"
}
]
}
}
{
"return": {}
}
{"data": {"device": "j2", "len": 67108864, "offset": 67108864, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"device": "j3", "len": 67108864, "offset": 67108864, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
--- Done ---

View File

@ -263,7 +263,9 @@
248 rw quick
249 rw auto quick
250 rw auto quick
251 rw auto quick
252 rw auto backing quick
253 rw auto quick
254 rw auto backing quick
255 rw auto quick
256 rw auto quick

View File

@ -524,7 +524,7 @@ def qmp_to_opts(self, obj):
output_list += [key + '=' + obj[key]]
return ','.join(output_list)
def get_qmp_events_filtered(self, wait=True):
def get_qmp_events_filtered(self, wait=60.0):
result = []
for ev in self.get_qmp_events(wait=wait):
result.append(filter_qmp_event(ev))
@ -542,28 +542,38 @@ def qmp_log(self, cmd, filters=[], indent=None, **kwargs):
# Returns None on success, and an error string on failure
def run_job(self, job, auto_finalize=True, auto_dismiss=False,
pre_finalize=None):
pre_finalize=None, wait=60.0):
match_device = {'data': {'device': job}}
match_id = {'data': {'id': job}}
events = [
('BLOCK_JOB_COMPLETED', match_device),
('BLOCK_JOB_CANCELLED', match_device),
('BLOCK_JOB_ERROR', match_device),
('BLOCK_JOB_READY', match_device),
('BLOCK_JOB_PENDING', match_id),
('JOB_STATUS_CHANGE', match_id)
]
error = None
while True:
for ev in self.get_qmp_events_filtered(wait=True):
if ev['event'] == 'JOB_STATUS_CHANGE':
status = ev['data']['status']
if status == 'aborting':
result = self.qmp('query-jobs')
for j in result['return']:
if j['id'] == job:
error = j['error']
log('Job failed: %s' % (j['error']))
elif status == 'pending' and not auto_finalize:
if pre_finalize:
pre_finalize()
self.qmp_log('job-finalize', id=job)
elif status == 'concluded' and not auto_dismiss:
self.qmp_log('job-dismiss', id=job)
elif status == 'null':
return error
else:
log(ev)
ev = filter_qmp_event(self.events_wait(events))
if ev['event'] != 'JOB_STATUS_CHANGE':
log(ev)
continue
status = ev['data']['status']
if status == 'aborting':
result = self.qmp('query-jobs')
for j in result['return']:
if j['id'] == job:
error = j['error']
log('Job failed: %s' % (j['error']))
elif status == 'pending' and not auto_finalize:
if pre_finalize:
pre_finalize()
self.qmp_log('job-finalize', id=job)
elif status == 'concluded' and not auto_dismiss:
self.qmp_log('job-dismiss', id=job)
elif status == 'null':
return error
def node_info(self, node_name):
nodes = self.qmp('query-named-block-nodes')
@ -650,7 +660,7 @@ def assert_json_filename_equal(self, json_filename, reference):
self.assertEqual(self.vm.flatten_qmp_object(json.loads(json_filename[5:])),
self.vm.flatten_qmp_object(reference))
def cancel_and_wait(self, drive='drive0', force=False, resume=False):
def cancel_and_wait(self, drive='drive0', force=False, resume=False, wait=60.0):
'''Cancel a block job and wait for it to finish, returning the event'''
result = self.vm.qmp('block-job-cancel', device=drive, force=force)
self.assert_qmp(result, 'return', {})
@ -661,7 +671,7 @@ def cancel_and_wait(self, drive='drive0', force=False, resume=False):
cancelled = False
result = None
while not cancelled:
for event in self.vm.get_qmp_events(wait=True):
for event in self.vm.get_qmp_events(wait=wait):
if event['event'] == 'BLOCK_JOB_COMPLETED' or \
event['event'] == 'BLOCK_JOB_CANCELLED':
self.assert_qmp(event, 'data/device', drive)
@ -674,10 +684,10 @@ def cancel_and_wait(self, drive='drive0', force=False, resume=False):
self.assert_no_active_block_jobs()
return result
def wait_until_completed(self, drive='drive0', check_offset=True):
def wait_until_completed(self, drive='drive0', check_offset=True, wait=60.0):
'''Wait for a block job to finish, returning the event'''
while True:
for event in self.vm.get_qmp_events(wait=True):
for event in self.vm.get_qmp_events(wait=wait):
if event['event'] == 'BLOCK_JOB_COMPLETED':
self.assert_qmp(event, 'data/device', drive)
self.assert_qmp_absent(event, 'data/error')