mirror of https://gitee.com/openkylin/libvirt.git
qemuMigrationDriveMirror: Listen to events
https://bugzilla.redhat.com/show_bug.cgi?id=1179678 When migrating with storage, libvirt iterates over domain disks and instruct qemu to migrate the ones we are interested in (shared, RO and source-less disks are skipped). The disks are migrated in series. No new disk is transferred until the previous one hasn't been quiesced. This is checked on the qemu monitor via 'query-jobs' command. If the disk has been quiesced, it practically went from copying its content to mirroring state, where all disk writes are mirrored to the other side of migration too. Having said that, there's one inherent error in the design. The monitor command we use reports only active jobs. So if the job fails for whatever reason, we will not see it anymore in the command output. And this can happen fairly simply: just try to migrate a domain with storage. If the storage migration fails (e.g. due to ENOSPC on the destination) we resume the host on the destination and let it run on partly copied disk. The proper fix is what even the comment in the code says: listen for qemu events instead of polling. If storage migration changes state an event is emitted and we can act accordingly: either consider disk copied and continue the process, or consider disk mangled and abort the migration. Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
This commit is contained in:
parent
76c61cdca2
commit
80c5f10e86
|
@ -1739,7 +1739,6 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
|
|||
|
||||
for (i = 0; i < vm->def->ndisks; i++) {
|
||||
virDomainDiskDefPtr disk = vm->def->disks[i];
|
||||
virDomainBlockJobInfo info;
|
||||
|
||||
/* skip shared, RO and source-less disks */
|
||||
if (disk->src->shared || disk->src->readonly ||
|
||||
|
@ -1772,38 +1771,36 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
|
|||
/* Poll every 500ms for progress & to allow cancellation */
|
||||
struct timespec ts = { .tv_sec = 0, .tv_nsec = 500 * 1000 * 1000ull };
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
/* Explicitly check if domain is still alive. Maybe qemu
|
||||
* died meanwhile so we won't see any event at all. */
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
||||
_("guest unexpectedly quit"));
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* The following check should be race free as long as the variable
|
||||
* is set only with domain object locked. And here we have the
|
||||
* domain object locked too. */
|
||||
if (priv->job.asyncAbort) {
|
||||
/* explicitly do this *after* we entered the monitor,
|
||||
* as this is a critical section so we are guaranteed
|
||||
* priv->job.asyncAbort will not change */
|
||||
ignore_value(qemuDomainObjExitMonitor(driver, vm));
|
||||
priv->job.current->type = VIR_DOMAIN_JOB_CANCELLED;
|
||||
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
_("canceled by client"));
|
||||
goto error;
|
||||
}
|
||||
mon_ret = qemuMonitorBlockJobInfo(priv->mon, diskAlias, &info,
|
||||
NULL);
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
||||
goto error;
|
||||
|
||||
if (mon_ret < 0)
|
||||
goto error;
|
||||
|
||||
if (info.cur == info.end) {
|
||||
if (disk->mirrorState == VIR_DOMAIN_DISK_MIRROR_STATE_READY) {
|
||||
VIR_DEBUG("Drive mirroring of '%s' completed", diskAlias);
|
||||
break;
|
||||
} else if (disk->mirrorState == VIR_DOMAIN_DISK_MIRROR_STATE_ABORT) {
|
||||
virReportError(VIR_ERR_OPERATION_FAILED,
|
||||
_("migration of disk %s failed"),
|
||||
disk->dst);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* XXX Frankly speaking, we should listen to the events,
|
||||
* instead of doing this. But this works for now and we
|
||||
* are doing something similar in migration itself anyway */
|
||||
/* XXX Turn this into virCond someday. */
|
||||
|
||||
virObjectUnlock(vm);
|
||||
|
||||
|
|
Loading…
Reference in New Issue