mirror of https://gitee.com/openkylin/libvirt.git
move jobs enums QEMU_X into hypervisor as VIR_X
These enums are essentially the same and always sorted in the same order in every hypervisor with jobs. They can be generalized by using the qemu enums as the main ones as they are the most extensive. Signed-off-by: Kristina Hanicova <khanicov@redhat.com> Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
This commit is contained in:
parent
111272a9fd
commit
a193f4bef6
|
@ -9,6 +9,38 @@
|
|||
#include "domain_job.h"
|
||||
|
||||
|
||||
VIR_ENUM_IMPL(virDomainJob,
|
||||
VIR_JOB_LAST,
|
||||
"none",
|
||||
"query",
|
||||
"destroy",
|
||||
"suspend",
|
||||
"modify",
|
||||
"abort",
|
||||
"migration operation",
|
||||
"none", /* async job is never stored in job.active */
|
||||
"async nested",
|
||||
);
|
||||
|
||||
VIR_ENUM_IMPL(virDomainAgentJob,
|
||||
VIR_AGENT_JOB_LAST,
|
||||
"none",
|
||||
"query",
|
||||
"modify",
|
||||
);
|
||||
|
||||
VIR_ENUM_IMPL(virDomainAsyncJob,
|
||||
VIR_ASYNC_JOB_LAST,
|
||||
"none",
|
||||
"migration out",
|
||||
"migration in",
|
||||
"save",
|
||||
"dump",
|
||||
"snapshot",
|
||||
"start",
|
||||
"backup",
|
||||
);
|
||||
|
||||
virDomainJobData *
|
||||
virDomainJobDataInit(virDomainJobDataPrivateDataCallbacks *cb)
|
||||
{
|
||||
|
|
|
@ -6,6 +6,58 @@
|
|||
#pragma once
|
||||
|
||||
#include "internal.h"
|
||||
#include "virenum.h"
|
||||
|
||||
/* Only 1 job is allowed at any time
|
||||
* A job includes *all* monitor commands, even those just querying
|
||||
* information, not merely actions */
|
||||
typedef enum {
|
||||
VIR_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
|
||||
VIR_JOB_QUERY, /* Doesn't change any state */
|
||||
VIR_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
|
||||
VIR_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
|
||||
VIR_JOB_MODIFY, /* May change state */
|
||||
VIR_JOB_ABORT, /* Abort current async job */
|
||||
VIR_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
|
||||
|
||||
/* The following two items must always be the last items before JOB_LAST */
|
||||
VIR_JOB_ASYNC, /* Asynchronous job */
|
||||
VIR_JOB_ASYNC_NESTED, /* Normal job within an async job */
|
||||
|
||||
VIR_JOB_LAST
|
||||
} virDomainJob;
|
||||
VIR_ENUM_DECL(virDomainJob);
|
||||
|
||||
|
||||
/* Currently only QEMU driver uses agent jobs */
|
||||
typedef enum {
|
||||
VIR_AGENT_JOB_NONE = 0, /* No agent job. */
|
||||
VIR_AGENT_JOB_QUERY, /* Does not change state of domain */
|
||||
VIR_AGENT_JOB_MODIFY, /* May change state of domain */
|
||||
|
||||
VIR_AGENT_JOB_LAST
|
||||
} virDomainAgentJob;
|
||||
VIR_ENUM_DECL(virDomainAgentJob);
|
||||
|
||||
|
||||
/* Async job consists of a series of jobs that may change state. Independent
|
||||
* jobs that do not change state (and possibly others if explicitly allowed by
|
||||
* current async job) are allowed to be run even if async job is active.
|
||||
* Currently supported by QEMU only. */
|
||||
typedef enum {
|
||||
VIR_ASYNC_JOB_NONE = 0,
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
VIR_ASYNC_JOB_SAVE,
|
||||
VIR_ASYNC_JOB_DUMP,
|
||||
VIR_ASYNC_JOB_SNAPSHOT,
|
||||
VIR_ASYNC_JOB_START,
|
||||
VIR_ASYNC_JOB_BACKUP,
|
||||
|
||||
VIR_ASYNC_JOB_LAST
|
||||
} virDomainAsyncJob;
|
||||
VIR_ENUM_DECL(virDomainAsyncJob);
|
||||
|
||||
|
||||
typedef enum {
|
||||
VIR_DOMAIN_JOB_STATUS_NONE = 0,
|
||||
|
|
|
@ -19,6 +19,7 @@ hypervisor_lib = static_library(
|
|||
],
|
||||
include_directories: [
|
||||
conf_inc_dir,
|
||||
util_inc_dir,
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -1577,10 +1577,15 @@ virDomainDriverSetupPersistentDefBlkioParams;
|
|||
|
||||
|
||||
# hypervisor/domain_job.h
|
||||
virDomainAgentJobTypeToString;
|
||||
virDomainAsyncJobTypeFromString;
|
||||
virDomainAsyncJobTypeToString;
|
||||
virDomainJobDataCopy;
|
||||
virDomainJobDataFree;
|
||||
virDomainJobDataInit;
|
||||
virDomainJobStatusToType;
|
||||
virDomainJobTypeFromString;
|
||||
virDomainJobTypeToString;
|
||||
|
||||
|
||||
# hypervisor/virclosecallbacks.h
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include "xen_common.h"
|
||||
#include "driver.h"
|
||||
#include "domain_validate.h"
|
||||
#include "domain_job.h"
|
||||
|
||||
#define VIR_FROM_THIS VIR_FROM_LIBXL
|
||||
|
||||
|
|
|
@ -73,14 +73,14 @@ The sequence of calling qemuMigrationJob* helper methods is as follows:
|
|||
- The first API of a migration protocol (Prepare or Perform/Begin depending on
|
||||
migration type and version) has to start migration job and keep it active:
|
||||
|
||||
qemuMigrationJobStart(driver, vm, QEMU_JOB_MIGRATION_{IN,OUT});
|
||||
qemuMigrationJobStart(driver, vm, VIR_JOB_MIGRATION_{IN,OUT});
|
||||
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
|
||||
...do work...
|
||||
qemuMigrationJobContinue(vm);
|
||||
|
||||
- All consequent phases except for the last one have to keep the job active:
|
||||
|
||||
if (!qemuMigrationJobIsActive(vm, QEMU_JOB_MIGRATION_{IN,OUT}))
|
||||
if (!qemuMigrationJobIsActive(vm, VIR_JOB_MIGRATION_{IN,OUT}))
|
||||
return;
|
||||
qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
|
||||
...do work...
|
||||
|
@ -88,7 +88,7 @@ The sequence of calling qemuMigrationJob* helper methods is as follows:
|
|||
|
||||
- The last migration phase finally finishes the migration job:
|
||||
|
||||
if (!qemuMigrationJobIsActive(vm, QEMU_JOB_MIGRATION_{IN,OUT}))
|
||||
if (!qemuMigrationJobIsActive(vm, VIR_JOB_MIGRATION_{IN,OUT}))
|
||||
return;
|
||||
qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
|
||||
...do work...
|
||||
|
|
|
@ -186,7 +186,7 @@ To acquire the QEMU monitor lock as part of an asynchronous job
|
|||
|
||||
These functions are for use inside an asynchronous job; the caller
|
||||
must check for a return of -1 (VM not running, so nothing to exit).
|
||||
Helper functions may also call this with QEMU_ASYNC_JOB_NONE when
|
||||
Helper functions may also call this with VIR_ASYNC_JOB_NONE when
|
||||
used from a sync job (such as when first starting a domain).
|
||||
|
||||
|
||||
|
@ -220,7 +220,7 @@ Design patterns
|
|||
|
||||
obj = qemuDomObjFromDomain(dom);
|
||||
|
||||
qemuDomainObjBeginJob(obj, QEMU_JOB_TYPE);
|
||||
qemuDomainObjBeginJob(obj, VIR_JOB_TYPE);
|
||||
|
||||
...do work...
|
||||
|
||||
|
@ -236,7 +236,7 @@ Design patterns
|
|||
|
||||
obj = qemuDomObjFromDomain(dom);
|
||||
|
||||
qemuDomainObjBeginJob(obj, QEMU_JOB_TYPE);
|
||||
qemuDomainObjBeginJob(obj, VIR_JOB_TYPE);
|
||||
|
||||
...do prep work...
|
||||
|
||||
|
@ -259,7 +259,7 @@ Design patterns
|
|||
|
||||
obj = qemuDomObjFromDomain(dom);
|
||||
|
||||
qemuDomainObjBeginAgentJob(obj, QEMU_AGENT_JOB_TYPE);
|
||||
qemuDomainObjBeginAgentJob(obj, VIR_AGENT_JOB_TYPE);
|
||||
|
||||
...do prep work...
|
||||
|
||||
|
@ -283,13 +283,13 @@ Design patterns
|
|||
|
||||
obj = qemuDomObjFromDomain(dom);
|
||||
|
||||
qemuDomainObjBeginAsyncJob(obj, QEMU_ASYNC_JOB_TYPE);
|
||||
qemuDomainObjBeginAsyncJob(obj, VIR_ASYNC_JOB_TYPE);
|
||||
qemuDomainObjSetAsyncJobMask(obj, allowedJobs);
|
||||
|
||||
...do prep work...
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, obj,
|
||||
QEMU_ASYNC_JOB_TYPE) < 0) {
|
||||
VIR_ASYNC_JOB_TYPE) < 0) {
|
||||
/* domain died in the meantime */
|
||||
goto error;
|
||||
}
|
||||
|
@ -298,7 +298,7 @@ Design patterns
|
|||
|
||||
while (!finished) {
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, obj,
|
||||
QEMU_ASYNC_JOB_TYPE) < 0) {
|
||||
VIR_ASYNC_JOB_TYPE) < 0) {
|
||||
/* domain died in the meantime */
|
||||
goto error;
|
||||
}
|
||||
|
@ -323,7 +323,7 @@ Design patterns
|
|||
|
||||
obj = qemuDomObjFromDomain(dom);
|
||||
|
||||
qemuDomainObjBeginAsyncJob(obj, QEMU_ASYNC_JOB_TYPE);
|
||||
qemuDomainObjBeginAsyncJob(obj, VIR_ASYNC_JOB_TYPE);
|
||||
|
||||
...do prep work...
|
||||
|
||||
|
|
|
@ -466,10 +466,10 @@ qemuBackupDiskPrepareOneStorage(virDomainObj *vm,
|
|||
|
||||
if (qemuBlockStorageSourceCreate(vm, dd->store, dd->backingStore, NULL,
|
||||
dd->crdata->srcdata[0],
|
||||
QEMU_ASYNC_JOB_BACKUP) < 0)
|
||||
VIR_ASYNC_JOB_BACKUP) < 0)
|
||||
return -1;
|
||||
} else {
|
||||
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0)
|
||||
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, VIR_ASYNC_JOB_BACKUP) < 0)
|
||||
return -1;
|
||||
|
||||
rc = qemuBlockStorageSourceAttachApply(priv->mon, dd->crdata->srcdata[0]);
|
||||
|
@ -622,7 +622,7 @@ qemuBackupJobTerminate(virDomainObj *vm,
|
|||
|
||||
g_clear_pointer(&priv->backup, virDomainBackupDefFree);
|
||||
|
||||
if (priv->job.asyncJob == QEMU_ASYNC_JOB_BACKUP)
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_BACKUP)
|
||||
qemuDomainObjEndAsyncJob(vm);
|
||||
}
|
||||
|
||||
|
@ -791,13 +791,13 @@ qemuBackupBegin(virDomainObj *vm,
|
|||
* infrastructure for async jobs. We'll allow standard modify-type jobs
|
||||
* as the interlocking of conflicting operations is handled on the block
|
||||
* job level */
|
||||
if (qemuDomainObjBeginAsyncJob(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP,
|
||||
if (qemuDomainObjBeginAsyncJob(priv->driver, vm, VIR_ASYNC_JOB_BACKUP,
|
||||
VIR_DOMAIN_JOB_OPERATION_BACKUP, flags) < 0)
|
||||
return -1;
|
||||
|
||||
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
|
||||
JOB_MASK(QEMU_JOB_SUSPEND) |
|
||||
JOB_MASK(QEMU_JOB_MODIFY)));
|
||||
JOB_MASK(VIR_JOB_SUSPEND) |
|
||||
JOB_MASK(VIR_JOB_MODIFY)));
|
||||
qemuDomainJobSetStatsType(priv->job.current,
|
||||
QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP);
|
||||
|
||||
|
@ -856,7 +856,7 @@ qemuBackupBegin(virDomainObj *vm,
|
|||
goto endjob;
|
||||
}
|
||||
|
||||
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_BACKUP)))
|
||||
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_BACKUP)))
|
||||
goto endjob;
|
||||
|
||||
if ((ndd = qemuBackupDiskPrepareData(vm, def, blockNamedNodeData, actions,
|
||||
|
@ -874,7 +874,7 @@ qemuBackupBegin(virDomainObj *vm,
|
|||
|
||||
priv->backup = g_steal_pointer(&def);
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0)
|
||||
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, VIR_ASYNC_JOB_BACKUP) < 0)
|
||||
goto endjob;
|
||||
|
||||
if (pull) {
|
||||
|
@ -910,7 +910,7 @@ qemuBackupBegin(virDomainObj *vm,
|
|||
}
|
||||
|
||||
if (pull) {
|
||||
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0)
|
||||
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, VIR_ASYNC_JOB_BACKUP) < 0)
|
||||
goto endjob;
|
||||
/* note that if the export fails we've already created the checkpoint
|
||||
* and we will not delete it */
|
||||
|
@ -918,7 +918,7 @@ qemuBackupBegin(virDomainObj *vm,
|
|||
qemuDomainObjExitMonitor(vm);
|
||||
|
||||
if (rc < 0) {
|
||||
qemuBackupJobCancelBlockjobs(vm, priv->backup, false, QEMU_ASYNC_JOB_BACKUP);
|
||||
qemuBackupJobCancelBlockjobs(vm, priv->backup, false, VIR_ASYNC_JOB_BACKUP);
|
||||
goto endjob;
|
||||
}
|
||||
}
|
||||
|
@ -932,7 +932,7 @@ qemuBackupBegin(virDomainObj *vm,
|
|||
qemuCheckpointRollbackMetadata(vm, chk);
|
||||
|
||||
if (!job_started && (nbd_running || tlsAlias || tlsSecretAlias) &&
|
||||
qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) == 0) {
|
||||
qemuDomainObjEnterMonitorAsync(priv->driver, vm, VIR_ASYNC_JOB_BACKUP) == 0) {
|
||||
if (nbd_running)
|
||||
ignore_value(qemuMonitorNBDServerStop(priv->mon));
|
||||
if (tlsAlias)
|
||||
|
|
|
@ -308,7 +308,7 @@ qemuBlockDiskDetectNodes(virDomainDiskDef *disk,
|
|||
int
|
||||
qemuBlockNodeNamesDetect(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
g_autoptr(GHashTable) disktable = NULL;
|
||||
|
@ -2120,7 +2120,7 @@ qemuBlockStorageSourceChainDetach(qemuMonitor *mon,
|
|||
int
|
||||
qemuBlockStorageSourceDetachOneBlockdev(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virStorageSource *src)
|
||||
{
|
||||
int ret;
|
||||
|
@ -2694,7 +2694,7 @@ qemuBlockStorageSourceCreateGeneric(virDomainObj *vm,
|
|||
virStorageSource *src,
|
||||
virStorageSource *chain,
|
||||
bool storageCreate,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
g_autoptr(virJSONValue) props = createProps;
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
@ -2749,7 +2749,7 @@ static int
|
|||
qemuBlockStorageSourceCreateStorage(virDomainObj *vm,
|
||||
virStorageSource *src,
|
||||
virStorageSource *chain,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
int actualType = virStorageSourceGetActualType(src);
|
||||
g_autoptr(virJSONValue) createstorageprops = NULL;
|
||||
|
@ -2786,7 +2786,7 @@ qemuBlockStorageSourceCreateFormat(virDomainObj *vm,
|
|||
virStorageSource *src,
|
||||
virStorageSource *backingStore,
|
||||
virStorageSource *chain,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
g_autoptr(virJSONValue) createformatprops = NULL;
|
||||
int ret;
|
||||
|
@ -2836,7 +2836,7 @@ qemuBlockStorageSourceCreate(virDomainObj *vm,
|
|||
virStorageSource *backingStore,
|
||||
virStorageSource *chain,
|
||||
qemuBlockStorageSourceAttachData *data,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
int ret = -1;
|
||||
|
@ -3020,7 +3020,7 @@ qemuBlockNamedNodeDataGetBitmapByName(GHashTable *blockNamedNodeData,
|
|||
|
||||
GHashTable *
|
||||
qemuBlockGetNamedNodeData(virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virQEMUDriver *driver = priv->driver;
|
||||
|
@ -3372,7 +3372,7 @@ qemuBlockReopenFormatMon(qemuMonitor *mon,
|
|||
static int
|
||||
qemuBlockReopenFormat(virDomainObj *vm,
|
||||
virStorageSource *src,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virQEMUDriver *driver = priv->driver;
|
||||
|
@ -3413,7 +3413,7 @@ qemuBlockReopenFormat(virDomainObj *vm,
|
|||
int
|
||||
qemuBlockReopenReadWrite(virDomainObj *vm,
|
||||
virStorageSource *src,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
if (!src->readonly)
|
||||
return 0;
|
||||
|
@ -3442,7 +3442,7 @@ qemuBlockReopenReadWrite(virDomainObj *vm,
|
|||
int
|
||||
qemuBlockReopenReadOnly(virDomainObj *vm,
|
||||
virStorageSource *src,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
if (src->readonly)
|
||||
return 0;
|
||||
|
|
|
@ -47,7 +47,7 @@ qemuBlockNodeNameGetBackingChain(virJSONValue *namednodesdata,
|
|||
int
|
||||
qemuBlockNodeNamesDetect(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
GHashTable *
|
||||
qemuBlockGetNodeData(virJSONValue *data);
|
||||
|
@ -143,7 +143,7 @@ qemuBlockStorageSourceAttachRollback(qemuMonitor *mon,
|
|||
int
|
||||
qemuBlockStorageSourceDetachOneBlockdev(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virStorageSource *src);
|
||||
|
||||
struct _qemuBlockStorageSourceChainData {
|
||||
|
@ -213,7 +213,7 @@ qemuBlockStorageSourceCreate(virDomainObj *vm,
|
|||
virStorageSource *backingStore,
|
||||
virStorageSource *chain,
|
||||
qemuBlockStorageSourceAttachData *data,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
int
|
||||
qemuBlockStorageSourceCreateDetectSize(GHashTable *blockNamedNodeData,
|
||||
|
@ -233,7 +233,7 @@ qemuBlockNamedNodeDataGetBitmapByName(GHashTable *blockNamedNodeData,
|
|||
|
||||
GHashTable *
|
||||
qemuBlockGetNamedNodeData(virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
int
|
||||
qemuBlockGetBitmapMergeActions(virStorageSource *topsrc,
|
||||
|
@ -272,11 +272,11 @@ qemuBlockReopenFormatMon(qemuMonitor *mon,
|
|||
int
|
||||
qemuBlockReopenReadWrite(virDomainObj *vm,
|
||||
virStorageSource *src,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
int
|
||||
qemuBlockReopenReadOnly(virDomainObj *vm,
|
||||
virStorageSource *src,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
bool
|
||||
qemuBlockStorageSourceNeedsStorageSliceLayer(const virStorageSource *src);
|
||||
|
|
|
@ -565,7 +565,7 @@ qemuBlockJobRefreshJobs(virQEMUDriver *driver,
|
|||
job->reconnected = true;
|
||||
|
||||
if (job->newstate != -1)
|
||||
qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
|
||||
qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
|
||||
/* 'job' may be invalid after this update */
|
||||
}
|
||||
|
||||
|
@ -839,7 +839,7 @@ qemuBlockJobEventProcessLegacy(virQEMUDriver *driver,
|
|||
static void
|
||||
qemuBlockJobEventProcessConcludedRemoveChain(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virStorageSource *chain)
|
||||
{
|
||||
g_autoptr(qemuBlockStorageSourceChainData) data = NULL;
|
||||
|
@ -942,7 +942,7 @@ qemuBlockJobClearConfigChain(virDomainObj *vm,
|
|||
static int
|
||||
qemuBlockJobProcessEventCompletedPullBitmaps(virDomainObj *vm,
|
||||
qemuBlockJobData *job,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
g_autoptr(GHashTable) blockNamedNodeData = NULL;
|
||||
|
@ -992,7 +992,7 @@ static void
|
|||
qemuBlockJobProcessEventCompletedPull(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuBlockJobData *job,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
virStorageSource *base = NULL;
|
||||
virStorageSource *baseparent = NULL;
|
||||
|
@ -1106,7 +1106,7 @@ qemuBlockJobDeleteImages(virQEMUDriver *driver,
|
|||
static int
|
||||
qemuBlockJobProcessEventCompletedCommitBitmaps(virDomainObj *vm,
|
||||
qemuBlockJobData *job,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
g_autoptr(GHashTable) blockNamedNodeData = NULL;
|
||||
|
@ -1168,7 +1168,7 @@ static void
|
|||
qemuBlockJobProcessEventCompletedCommit(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuBlockJobData *job,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
virStorageSource *baseparent = NULL;
|
||||
virDomainDiskDef *cfgdisk = NULL;
|
||||
|
@ -1258,7 +1258,7 @@ static void
|
|||
qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuBlockJobData *job,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
virStorageSource *baseparent = NULL;
|
||||
virDomainDiskDef *cfgdisk = NULL;
|
||||
|
@ -1329,7 +1329,7 @@ qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriver *driver,
|
|||
static int
|
||||
qemuBlockJobProcessEventCompletedCopyBitmaps(virDomainObj *vm,
|
||||
qemuBlockJobData *job,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
g_autoptr(GHashTable) blockNamedNodeData = NULL;
|
||||
|
@ -1366,7 +1366,7 @@ static void
|
|||
qemuBlockJobProcessEventConcludedCopyPivot(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuBlockJobData *job,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
VIR_DEBUG("copy job '%s' on VM '%s' pivoted", job->name, vm->def->name);
|
||||
|
@ -1402,7 +1402,7 @@ static void
|
|||
qemuBlockJobProcessEventConcludedCopyAbort(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuBlockJobData *job,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
||||
|
@ -1438,7 +1438,7 @@ static void
|
|||
qemuBlockJobProcessEventFailedActiveCommit(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuBlockJobData *job,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainDiskDef *disk = job->disk;
|
||||
|
@ -1470,7 +1470,7 @@ static void
|
|||
qemuBlockJobProcessEventConcludedCreate(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuBlockJobData *job,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
g_autoptr(qemuBlockStorageSourceAttachData) backend = NULL;
|
||||
|
||||
|
@ -1511,7 +1511,7 @@ static void
|
|||
qemuBlockJobProcessEventConcludedBackup(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuBlockJobData *job,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
qemuBlockjobState newstate,
|
||||
unsigned long long progressCurrent,
|
||||
unsigned long long progressTotal)
|
||||
|
@ -1547,7 +1547,7 @@ static void
|
|||
qemuBlockJobEventProcessConcludedTransition(qemuBlockJobData *job,
|
||||
virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
unsigned long long progressCurrent,
|
||||
unsigned long long progressTotal)
|
||||
{
|
||||
|
@ -1607,7 +1607,7 @@ static void
|
|||
qemuBlockJobEventProcessConcluded(qemuBlockJobData *job,
|
||||
virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuMonitorJobInfo **jobinfo = NULL;
|
||||
size_t njobinfo = 0;
|
||||
|
@ -1688,7 +1688,7 @@ static void
|
|||
qemuBlockJobEventProcess(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuBlockJobData *job,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
|
||||
{
|
||||
switch ((qemuBlockjobState) job->newstate) {
|
||||
|
|
|
@ -192,7 +192,7 @@ qemuCheckpointDiscardBitmaps(virDomainObj *vm,
|
|||
|
||||
actions = virJSONValueNewArray();
|
||||
|
||||
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
|
||||
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < chkdef->ndisks; i++) {
|
||||
|
@ -229,7 +229,7 @@ qemuCheckpointDiscardBitmaps(virDomainObj *vm,
|
|||
goto relabel;
|
||||
|
||||
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN) &&
|
||||
qemuBlockReopenReadWrite(vm, src, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
qemuBlockReopenReadWrite(vm, src, VIR_ASYNC_JOB_NONE) < 0)
|
||||
goto relabel;
|
||||
|
||||
relabelimages = g_slist_prepend(relabelimages, src);
|
||||
|
@ -244,7 +244,7 @@ qemuCheckpointDiscardBitmaps(virDomainObj *vm,
|
|||
virStorageSource *src = next->data;
|
||||
|
||||
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN))
|
||||
ignore_value(qemuBlockReopenReadOnly(vm, src, QEMU_ASYNC_JOB_NONE));
|
||||
ignore_value(qemuBlockReopenReadOnly(vm, src, VIR_ASYNC_JOB_NONE));
|
||||
|
||||
ignore_value(qemuDomainStorageSourceAccessAllow(driver, vm, src,
|
||||
true, false, false));
|
||||
|
@ -417,7 +417,7 @@ qemuCheckpointRedefineValidateBitmaps(virDomainObj *vm,
|
|||
if (virDomainObjCheckActive(vm) < 0)
|
||||
return -1;
|
||||
|
||||
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
|
||||
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < chkdef->ndisks; i++) {
|
||||
|
@ -607,7 +607,7 @@ qemuCheckpointCreateXML(virDomainPtr domain,
|
|||
/* Unlike snapshots, the RNG schema already ensured a sane filename. */
|
||||
|
||||
/* We are going to modify the domain below. */
|
||||
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
||||
if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
|
||||
return NULL;
|
||||
|
||||
if (redefine) {
|
||||
|
@ -658,13 +658,13 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObj *vm,
|
|||
size_t i;
|
||||
int ret = -1;
|
||||
|
||||
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
||||
if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
|
||||
return -1;
|
||||
|
||||
if (virDomainObjCheckActive(vm) < 0)
|
||||
goto endjob;
|
||||
|
||||
if (!(nodedataMerge = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
|
||||
if (!(nodedataMerge = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
|
||||
goto endjob;
|
||||
|
||||
/* enumerate disks relevant for the checkpoint which are also present in the
|
||||
|
@ -741,7 +741,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObj *vm,
|
|||
goto endjob;
|
||||
|
||||
/* now do a final refresh */
|
||||
if (!(nodedataStats = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
|
||||
if (!(nodedataStats = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
|
||||
goto endjob;
|
||||
|
||||
qemuDomainObjEnterMonitor(driver, vm);
|
||||
|
@ -852,7 +852,7 @@ qemuCheckpointDelete(virDomainObj *vm,
|
|||
VIR_DOMAIN_CHECKPOINT_DELETE_METADATA_ONLY |
|
||||
VIR_DOMAIN_CHECKPOINT_DELETE_CHILDREN_ONLY, -1);
|
||||
|
||||
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
||||
if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
|
||||
return -1;
|
||||
|
||||
if (!metadata_only) {
|
||||
|
|
|
@ -210,7 +210,7 @@ qemuDomainFormatJobPrivate(virBuffer *buf,
|
|||
{
|
||||
qemuDomainJobPrivate *priv = job->privateData;
|
||||
|
||||
if (job->asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
||||
if (job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
if (qemuDomainObjPrivateXMLFormatNBDMigration(buf, vm) < 0)
|
||||
return -1;
|
||||
|
||||
|
@ -284,7 +284,7 @@ qemuDomainObjPrivateXMLParseJobNBD(virDomainObj *vm,
|
|||
return -1;
|
||||
|
||||
if (n > 0) {
|
||||
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
||||
if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
VIR_WARN("Found disks marked for migration but we were not "
|
||||
"migrating");
|
||||
n = 0;
|
||||
|
@ -5858,11 +5858,11 @@ qemuDomainSaveConfig(virDomainObj *obj)
|
|||
static int
|
||||
qemuDomainObjEnterMonitorInternal(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
if (asyncJob != QEMU_ASYNC_JOB_NONE) {
|
||||
if (asyncJob != VIR_ASYNC_JOB_NONE) {
|
||||
int ret;
|
||||
if ((ret = qemuDomainObjBeginNestedJob(driver, obj, asyncJob)) < 0)
|
||||
return ret;
|
||||
|
@ -5878,7 +5878,7 @@ qemuDomainObjEnterMonitorInternal(virQEMUDriver *driver,
|
|||
} else if (priv->job.owner != virThreadSelfID()) {
|
||||
VIR_WARN("Entering a monitor without owning a job. "
|
||||
"Job %s owner %s (%llu)",
|
||||
qemuDomainJobTypeToString(priv->job.active),
|
||||
virDomainJobTypeToString(priv->job.active),
|
||||
priv->job.ownerAPI, priv->job.owner);
|
||||
}
|
||||
|
||||
|
@ -5918,7 +5918,7 @@ qemuDomainObjExitMonitor(virDomainObj *obj)
|
|||
if (!hasRefs)
|
||||
priv->mon = NULL;
|
||||
|
||||
if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
|
||||
if (priv->job.active == VIR_JOB_ASYNC_NESTED)
|
||||
qemuDomainObjEndJob(obj);
|
||||
}
|
||||
|
||||
|
@ -5926,7 +5926,7 @@ void qemuDomainObjEnterMonitor(virQEMUDriver *driver,
|
|||
virDomainObj *obj)
|
||||
{
|
||||
ignore_value(qemuDomainObjEnterMonitorInternal(driver, obj,
|
||||
QEMU_ASYNC_JOB_NONE));
|
||||
VIR_ASYNC_JOB_NONE));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -5935,7 +5935,7 @@ void qemuDomainObjEnterMonitor(virQEMUDriver *driver,
|
|||
* To be called immediately before any QEMU monitor API call.
|
||||
* Must have already either called qemuDomainObjBeginJob()
|
||||
* and checked that the VM is still active, with asyncJob of
|
||||
* QEMU_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
|
||||
* VIR_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
|
||||
* with the same asyncJob.
|
||||
*
|
||||
* Returns 0 if job was started, in which case this must be followed with
|
||||
|
@ -5946,7 +5946,7 @@ void qemuDomainObjEnterMonitor(virQEMUDriver *driver,
|
|||
int
|
||||
qemuDomainObjEnterMonitorAsync(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
return qemuDomainObjEnterMonitorInternal(driver, obj, asyncJob);
|
||||
}
|
||||
|
@ -7135,7 +7135,7 @@ qemuDomainRemoveInactiveLocked(virQEMUDriver *driver,
|
|||
* qemuDomainRemoveInactiveJob:
|
||||
*
|
||||
* Just like qemuDomainRemoveInactive but it tries to grab a
|
||||
* QEMU_JOB_MODIFY first. Even though it doesn't succeed in
|
||||
* VIR_JOB_MODIFY first. Even though it doesn't succeed in
|
||||
* grabbing the job the control carries with
|
||||
* qemuDomainRemoveInactive call.
|
||||
*/
|
||||
|
@ -7145,7 +7145,7 @@ qemuDomainRemoveInactiveJob(virQEMUDriver *driver,
|
|||
{
|
||||
bool haveJob;
|
||||
|
||||
haveJob = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) >= 0;
|
||||
haveJob = qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) >= 0;
|
||||
|
||||
qemuDomainRemoveInactive(driver, vm);
|
||||
|
||||
|
@ -7166,7 +7166,7 @@ qemuDomainRemoveInactiveJobLocked(virQEMUDriver *driver,
|
|||
{
|
||||
bool haveJob;
|
||||
|
||||
haveJob = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) >= 0;
|
||||
haveJob = qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) >= 0;
|
||||
|
||||
qemuDomainRemoveInactiveLocked(driver, vm);
|
||||
|
||||
|
@ -10071,7 +10071,7 @@ qemuDomainVcpuPersistOrder(virDomainDef *def)
|
|||
int
|
||||
qemuDomainCheckMonitor(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
int ret;
|
||||
|
|
|
@ -500,7 +500,7 @@ void qemuDomainObjExitMonitor(virDomainObj *obj)
|
|||
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
|
||||
int qemuDomainObjEnterMonitorAsync(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
|
||||
|
||||
|
||||
|
@ -892,7 +892,7 @@ void qemuDomainVcpuPersistOrder(virDomainDef *def)
|
|||
|
||||
int qemuDomainCheckMonitor(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
bool qemuDomainSupportsVideoVga(const virDomainVideoDef *video,
|
||||
virQEMUCaps *qemuCaps);
|
||||
|
|
|
@ -31,38 +31,6 @@
|
|||
|
||||
VIR_LOG_INIT("qemu.qemu_domainjob");
|
||||
|
||||
VIR_ENUM_IMPL(qemuDomainJob,
|
||||
QEMU_JOB_LAST,
|
||||
"none",
|
||||
"query",
|
||||
"destroy",
|
||||
"suspend",
|
||||
"modify",
|
||||
"abort",
|
||||
"migration operation",
|
||||
"none", /* async job is never stored in job.active */
|
||||
"async nested",
|
||||
);
|
||||
|
||||
VIR_ENUM_IMPL(qemuDomainAgentJob,
|
||||
QEMU_AGENT_JOB_LAST,
|
||||
"none",
|
||||
"query",
|
||||
"modify",
|
||||
);
|
||||
|
||||
VIR_ENUM_IMPL(qemuDomainAsyncJob,
|
||||
QEMU_ASYNC_JOB_LAST,
|
||||
"none",
|
||||
"migration out",
|
||||
"migration in",
|
||||
"save",
|
||||
"dump",
|
||||
"snapshot",
|
||||
"start",
|
||||
"backup",
|
||||
);
|
||||
|
||||
static void *
|
||||
qemuJobDataAllocPrivateData(void)
|
||||
{
|
||||
|
@ -106,22 +74,22 @@ qemuDomainJobSetStatsType(virDomainJobData *jobData,
|
|||
|
||||
|
||||
const char *
|
||||
qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
|
||||
virDomainAsyncJobPhaseToString(virDomainAsyncJob job,
|
||||
int phase G_GNUC_UNUSED)
|
||||
{
|
||||
switch (job) {
|
||||
case QEMU_ASYNC_JOB_MIGRATION_OUT:
|
||||
case QEMU_ASYNC_JOB_MIGRATION_IN:
|
||||
case VIR_ASYNC_JOB_MIGRATION_OUT:
|
||||
case VIR_ASYNC_JOB_MIGRATION_IN:
|
||||
return qemuMigrationJobPhaseTypeToString(phase);
|
||||
|
||||
case QEMU_ASYNC_JOB_SAVE:
|
||||
case QEMU_ASYNC_JOB_DUMP:
|
||||
case QEMU_ASYNC_JOB_SNAPSHOT:
|
||||
case QEMU_ASYNC_JOB_START:
|
||||
case QEMU_ASYNC_JOB_NONE:
|
||||
case QEMU_ASYNC_JOB_BACKUP:
|
||||
case VIR_ASYNC_JOB_SAVE:
|
||||
case VIR_ASYNC_JOB_DUMP:
|
||||
case VIR_ASYNC_JOB_SNAPSHOT:
|
||||
case VIR_ASYNC_JOB_START:
|
||||
case VIR_ASYNC_JOB_NONE:
|
||||
case VIR_ASYNC_JOB_BACKUP:
|
||||
G_GNUC_FALLTHROUGH;
|
||||
case QEMU_ASYNC_JOB_LAST:
|
||||
case VIR_ASYNC_JOB_LAST:
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -129,25 +97,25 @@ qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
|
|||
}
|
||||
|
||||
int
|
||||
qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
|
||||
virDomainAsyncJobPhaseFromString(virDomainAsyncJob job,
|
||||
const char *phase)
|
||||
{
|
||||
if (!phase)
|
||||
return 0;
|
||||
|
||||
switch (job) {
|
||||
case QEMU_ASYNC_JOB_MIGRATION_OUT:
|
||||
case QEMU_ASYNC_JOB_MIGRATION_IN:
|
||||
case VIR_ASYNC_JOB_MIGRATION_OUT:
|
||||
case VIR_ASYNC_JOB_MIGRATION_IN:
|
||||
return qemuMigrationJobPhaseTypeFromString(phase);
|
||||
|
||||
case QEMU_ASYNC_JOB_SAVE:
|
||||
case QEMU_ASYNC_JOB_DUMP:
|
||||
case QEMU_ASYNC_JOB_SNAPSHOT:
|
||||
case QEMU_ASYNC_JOB_START:
|
||||
case QEMU_ASYNC_JOB_NONE:
|
||||
case QEMU_ASYNC_JOB_BACKUP:
|
||||
case VIR_ASYNC_JOB_SAVE:
|
||||
case VIR_ASYNC_JOB_DUMP:
|
||||
case VIR_ASYNC_JOB_SNAPSHOT:
|
||||
case VIR_ASYNC_JOB_START:
|
||||
case VIR_ASYNC_JOB_NONE:
|
||||
case VIR_ASYNC_JOB_BACKUP:
|
||||
G_GNUC_FALLTHROUGH;
|
||||
case QEMU_ASYNC_JOB_LAST:
|
||||
case VIR_ASYNC_JOB_LAST:
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -211,7 +179,7 @@ qemuDomainObjInitJob(qemuDomainJobObj *job,
|
|||
static void
|
||||
qemuDomainObjResetJob(qemuDomainJobObj *job)
|
||||
{
|
||||
job->active = QEMU_JOB_NONE;
|
||||
job->active = VIR_JOB_NONE;
|
||||
job->owner = 0;
|
||||
g_clear_pointer(&job->ownerAPI, g_free);
|
||||
job->started = 0;
|
||||
|
@ -221,7 +189,7 @@ qemuDomainObjResetJob(qemuDomainJobObj *job)
|
|||
static void
|
||||
qemuDomainObjResetAgentJob(qemuDomainJobObj *job)
|
||||
{
|
||||
job->agentActive = QEMU_AGENT_JOB_NONE;
|
||||
job->agentActive = VIR_AGENT_JOB_NONE;
|
||||
job->agentOwner = 0;
|
||||
g_clear_pointer(&job->agentOwnerAPI, g_free);
|
||||
job->agentStarted = 0;
|
||||
|
@ -231,7 +199,7 @@ qemuDomainObjResetAgentJob(qemuDomainJobObj *job)
|
|||
static void
|
||||
qemuDomainObjResetAsyncJob(qemuDomainJobObj *job)
|
||||
{
|
||||
job->asyncJob = QEMU_ASYNC_JOB_NONE;
|
||||
job->asyncJob = VIR_ASYNC_JOB_NONE;
|
||||
job->asyncOwner = 0;
|
||||
g_clear_pointer(&job->asyncOwnerAPI, g_free);
|
||||
job->asyncStarted = 0;
|
||||
|
@ -286,7 +254,7 @@ qemuDomainObjClearJob(qemuDomainJobObj *job)
|
|||
}
|
||||
|
||||
bool
|
||||
qemuDomainTrackJob(qemuDomainJob job)
|
||||
qemuDomainTrackJob(virDomainJob job)
|
||||
{
|
||||
return (QEMU_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0;
|
||||
}
|
||||
|
@ -713,14 +681,14 @@ qemuDomainObjSetJobPhase(virDomainObj *obj,
|
|||
return;
|
||||
|
||||
VIR_DEBUG("Setting '%s' phase to '%s'",
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
|
||||
|
||||
if (priv->job.asyncOwner == 0) {
|
||||
priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet());
|
||||
} else if (me != priv->job.asyncOwner) {
|
||||
VIR_WARN("'%s' async job is owned by thread %llu",
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
priv->job.asyncOwner);
|
||||
}
|
||||
|
||||
|
@ -738,7 +706,7 @@ qemuDomainObjSetAsyncJobMask(virDomainObj *obj,
|
|||
if (!priv->job.asyncJob)
|
||||
return;
|
||||
|
||||
priv->job.mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY);
|
||||
priv->job.mask = allowedJobs | JOB_MASK(VIR_JOB_DESTROY);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -746,7 +714,7 @@ qemuDomainObjDiscardAsyncJob(virDomainObj *obj)
|
|||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
|
||||
if (priv->job.active == VIR_JOB_ASYNC_NESTED)
|
||||
qemuDomainObjResetJob(&priv->job);
|
||||
qemuDomainObjResetAsyncJob(&priv->job);
|
||||
qemuDomainSaveStatus(obj);
|
||||
|
@ -758,33 +726,33 @@ qemuDomainObjReleaseAsyncJob(virDomainObj *obj)
|
|||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
VIR_DEBUG("Releasing ownership of '%s' async job",
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
|
||||
if (priv->job.asyncOwner != virThreadSelfID()) {
|
||||
VIR_WARN("'%s' async job is owned by thread %llu",
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
priv->job.asyncOwner);
|
||||
}
|
||||
priv->job.asyncOwner = 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
qemuDomainNestedJobAllowed(qemuDomainJobObj *jobs, qemuDomainJob newJob)
|
||||
qemuDomainNestedJobAllowed(qemuDomainJobObj *jobs, virDomainJob newJob)
|
||||
{
|
||||
return !jobs->asyncJob ||
|
||||
newJob == QEMU_JOB_NONE ||
|
||||
newJob == VIR_JOB_NONE ||
|
||||
(jobs->mask & JOB_MASK(newJob)) != 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
qemuDomainObjCanSetJob(qemuDomainJobObj *job,
|
||||
qemuDomainJob newJob,
|
||||
qemuDomainAgentJob newAgentJob)
|
||||
virDomainJob newJob,
|
||||
virDomainAgentJob newAgentJob)
|
||||
{
|
||||
return ((newJob == QEMU_JOB_NONE ||
|
||||
job->active == QEMU_JOB_NONE) &&
|
||||
(newAgentJob == QEMU_AGENT_JOB_NONE ||
|
||||
job->agentActive == QEMU_AGENT_JOB_NONE));
|
||||
return ((newJob == VIR_JOB_NONE ||
|
||||
job->active == VIR_JOB_NONE) &&
|
||||
(newAgentJob == VIR_AGENT_JOB_NONE ||
|
||||
job->agentActive == VIR_AGENT_JOB_NONE));
|
||||
}
|
||||
|
||||
/* Give up waiting for mutex after 30 seconds */
|
||||
|
@ -794,8 +762,8 @@ qemuDomainObjCanSetJob(qemuDomainJobObj *job,
|
|||
* qemuDomainObjBeginJobInternal:
|
||||
* @driver: qemu driver
|
||||
* @obj: domain object
|
||||
* @job: qemuDomainJob to start
|
||||
* @asyncJob: qemuDomainAsyncJob to start
|
||||
* @job: virDomainJob to start
|
||||
* @asyncJob: virDomainAsyncJob to start
|
||||
* @nowait: don't wait trying to acquire @job
|
||||
*
|
||||
* Acquires job for a domain object which must be locked before
|
||||
|
@ -815,16 +783,16 @@ qemuDomainObjCanSetJob(qemuDomainJobObj *job,
|
|||
static int ATTRIBUTE_NONNULL(1)
|
||||
qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainJob job,
|
||||
qemuDomainAgentJob agentJob,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainJob job,
|
||||
virDomainAgentJob agentJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
bool nowait)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
unsigned long long now;
|
||||
unsigned long long then;
|
||||
bool nested = job == QEMU_JOB_ASYNC_NESTED;
|
||||
bool async = job == QEMU_JOB_ASYNC;
|
||||
bool nested = job == VIR_JOB_ASYNC_NESTED;
|
||||
bool async = job == VIR_JOB_ASYNC;
|
||||
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
||||
const char *blocker = NULL;
|
||||
const char *agentBlocker = NULL;
|
||||
|
@ -837,13 +805,13 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
|
|||
VIR_DEBUG("Starting job: API=%s job=%s agentJob=%s asyncJob=%s "
|
||||
"(vm=%p name=%s, current job=%s agentJob=%s async=%s)",
|
||||
NULLSTR(currentAPI),
|
||||
qemuDomainJobTypeToString(job),
|
||||
qemuDomainAgentJobTypeToString(agentJob),
|
||||
qemuDomainAsyncJobTypeToString(asyncJob),
|
||||
virDomainJobTypeToString(job),
|
||||
virDomainAgentJobTypeToString(agentJob),
|
||||
virDomainAsyncJobTypeToString(asyncJob),
|
||||
obj, obj->def->name,
|
||||
qemuDomainJobTypeToString(priv->job.active),
|
||||
qemuDomainAgentJobTypeToString(priv->job.agentActive),
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
virDomainJobTypeToString(priv->job.active),
|
||||
virDomainAgentJobTypeToString(priv->job.agentActive),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
|
||||
if (virTimeMillisNow(&now) < 0)
|
||||
return -1;
|
||||
|
@ -852,7 +820,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
|
|||
then = now + QEMU_JOB_WAIT_TIME;
|
||||
|
||||
retry:
|
||||
if ((!async && job != QEMU_JOB_DESTROY) &&
|
||||
if ((!async && job != VIR_JOB_DESTROY) &&
|
||||
cfg->maxQueuedJobs &&
|
||||
priv->job.jobsQueued > cfg->maxQueuedJobs) {
|
||||
goto error;
|
||||
|
@ -886,10 +854,10 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
|
|||
if (job) {
|
||||
qemuDomainObjResetJob(&priv->job);
|
||||
|
||||
if (job != QEMU_JOB_ASYNC) {
|
||||
if (job != VIR_JOB_ASYNC) {
|
||||
VIR_DEBUG("Started job: %s (async=%s vm=%p name=%s)",
|
||||
qemuDomainJobTypeToString(job),
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainJobTypeToString(job),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
obj, obj->def->name);
|
||||
priv->job.active = job;
|
||||
priv->job.owner = virThreadSelfID();
|
||||
|
@ -897,7 +865,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
|
|||
priv->job.started = now;
|
||||
} else {
|
||||
VIR_DEBUG("Started async job: %s (vm=%p name=%s)",
|
||||
qemuDomainAsyncJobTypeToString(asyncJob),
|
||||
virDomainAsyncJobTypeToString(asyncJob),
|
||||
obj, obj->def->name);
|
||||
qemuDomainObjResetAsyncJob(&priv->job);
|
||||
priv->job.current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
|
||||
|
@ -914,10 +882,10 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
|
|||
qemuDomainObjResetAgentJob(&priv->job);
|
||||
|
||||
VIR_DEBUG("Started agent job: %s (vm=%p name=%s job=%s async=%s)",
|
||||
qemuDomainAgentJobTypeToString(agentJob),
|
||||
virDomainAgentJobTypeToString(agentJob),
|
||||
obj, obj->def->name,
|
||||
qemuDomainJobTypeToString(priv->job.active),
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
virDomainJobTypeToString(priv->job.active),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
priv->job.agentActive = agentJob;
|
||||
priv->job.agentOwner = virThreadSelfID();
|
||||
priv->job.agentOwnerAPI = g_strdup(virThreadJobGet());
|
||||
|
@ -942,14 +910,14 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
|
|||
"current job is (%s, %s, %s) "
|
||||
"owned by (%llu %s, %llu %s, %llu %s (flags=0x%lx)) "
|
||||
"for (%llus, %llus, %llus)",
|
||||
qemuDomainJobTypeToString(job),
|
||||
qemuDomainAgentJobTypeToString(agentJob),
|
||||
qemuDomainAsyncJobTypeToString(asyncJob),
|
||||
virDomainJobTypeToString(job),
|
||||
virDomainAgentJobTypeToString(agentJob),
|
||||
virDomainAsyncJobTypeToString(asyncJob),
|
||||
NULLSTR(currentAPI),
|
||||
obj->def->name,
|
||||
qemuDomainJobTypeToString(priv->job.active),
|
||||
qemuDomainAgentJobTypeToString(priv->job.agentActive),
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainJobTypeToString(priv->job.active),
|
||||
virDomainAgentJobTypeToString(priv->job.agentActive),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
priv->job.owner, NULLSTR(priv->job.ownerAPI),
|
||||
priv->job.agentOwner, NULLSTR(priv->job.agentOwnerAPI),
|
||||
priv->job.asyncOwner, NULLSTR(priv->job.asyncOwnerAPI),
|
||||
|
@ -1032,11 +1000,11 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
|
|||
*/
|
||||
int qemuDomainObjBeginJob(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainJob job)
|
||||
virDomainJob job)
|
||||
{
|
||||
if (qemuDomainObjBeginJobInternal(driver, obj, job,
|
||||
QEMU_AGENT_JOB_NONE,
|
||||
QEMU_ASYNC_JOB_NONE, false) < 0)
|
||||
VIR_AGENT_JOB_NONE,
|
||||
VIR_ASYNC_JOB_NONE, false) < 0)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1051,23 +1019,23 @@ int qemuDomainObjBeginJob(virQEMUDriver *driver,
|
|||
int
|
||||
qemuDomainObjBeginAgentJob(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainAgentJob agentJob)
|
||||
virDomainAgentJob agentJob)
|
||||
{
|
||||
return qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_NONE,
|
||||
return qemuDomainObjBeginJobInternal(driver, obj, VIR_JOB_NONE,
|
||||
agentJob,
|
||||
QEMU_ASYNC_JOB_NONE, false);
|
||||
VIR_ASYNC_JOB_NONE, false);
|
||||
}
|
||||
|
||||
int qemuDomainObjBeginAsyncJob(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virDomainJobOperation operation,
|
||||
unsigned long apiFlags)
|
||||
{
|
||||
qemuDomainObjPrivate *priv;
|
||||
|
||||
if (qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_ASYNC,
|
||||
QEMU_AGENT_JOB_NONE,
|
||||
if (qemuDomainObjBeginJobInternal(driver, obj, VIR_JOB_ASYNC,
|
||||
VIR_AGENT_JOB_NONE,
|
||||
asyncJob, false) < 0)
|
||||
return -1;
|
||||
|
||||
|
@ -1080,7 +1048,7 @@ int qemuDomainObjBeginAsyncJob(virQEMUDriver *driver,
|
|||
int
|
||||
qemuDomainObjBeginNestedJob(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
|
@ -1097,9 +1065,9 @@ qemuDomainObjBeginNestedJob(virQEMUDriver *driver,
|
|||
}
|
||||
|
||||
return qemuDomainObjBeginJobInternal(driver, obj,
|
||||
QEMU_JOB_ASYNC_NESTED,
|
||||
QEMU_AGENT_JOB_NONE,
|
||||
QEMU_ASYNC_JOB_NONE,
|
||||
VIR_JOB_ASYNC_NESTED,
|
||||
VIR_AGENT_JOB_NONE,
|
||||
VIR_ASYNC_JOB_NONE,
|
||||
false);
|
||||
}
|
||||
|
||||
|
@ -1108,7 +1076,7 @@ qemuDomainObjBeginNestedJob(virQEMUDriver *driver,
|
|||
*
|
||||
* @driver: qemu driver
|
||||
* @obj: domain object
|
||||
* @job: qemuDomainJob to start
|
||||
* @job: virDomainJob to start
|
||||
*
|
||||
* Acquires job for a domain object which must be locked before
|
||||
* calling. If there's already a job running it returns
|
||||
|
@ -1119,11 +1087,11 @@ qemuDomainObjBeginNestedJob(virQEMUDriver *driver,
|
|||
int
|
||||
qemuDomainObjBeginJobNowait(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainJob job)
|
||||
virDomainJob job)
|
||||
{
|
||||
return qemuDomainObjBeginJobInternal(driver, obj, job,
|
||||
QEMU_AGENT_JOB_NONE,
|
||||
QEMU_ASYNC_JOB_NONE, true);
|
||||
VIR_AGENT_JOB_NONE,
|
||||
VIR_ASYNC_JOB_NONE, true);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1136,13 +1104,13 @@ void
|
|||
qemuDomainObjEndJob(virDomainObj *obj)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
qemuDomainJob job = priv->job.active;
|
||||
virDomainJob job = priv->job.active;
|
||||
|
||||
priv->job.jobsQueued--;
|
||||
|
||||
VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)",
|
||||
qemuDomainJobTypeToString(job),
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainJobTypeToString(job),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
obj, obj->def->name);
|
||||
|
||||
qemuDomainObjResetJob(&priv->job);
|
||||
|
@ -1157,13 +1125,13 @@ void
|
|||
qemuDomainObjEndAgentJob(virDomainObj *obj)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
qemuDomainAgentJob agentJob = priv->job.agentActive;
|
||||
virDomainAgentJob agentJob = priv->job.agentActive;
|
||||
|
||||
priv->job.jobsQueued--;
|
||||
|
||||
VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)",
|
||||
qemuDomainAgentJobTypeToString(agentJob),
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAgentJobTypeToString(agentJob),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
obj, obj->def->name);
|
||||
|
||||
qemuDomainObjResetAgentJob(&priv->job);
|
||||
|
@ -1180,7 +1148,7 @@ qemuDomainObjEndAsyncJob(virDomainObj *obj)
|
|||
priv->job.jobsQueued--;
|
||||
|
||||
VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)",
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
obj, obj->def->name);
|
||||
|
||||
qemuDomainObjResetAsyncJob(&priv->job);
|
||||
|
@ -1194,7 +1162,7 @@ qemuDomainObjAbortAsyncJob(virDomainObj *obj)
|
|||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)",
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
obj, obj->def->name);
|
||||
|
||||
priv->job.abortJob = true;
|
||||
|
@ -1208,26 +1176,26 @@ qemuDomainObjPrivateXMLFormatJob(virBuffer *buf,
|
|||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER;
|
||||
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf);
|
||||
qemuDomainJob job = priv->job.active;
|
||||
virDomainJob job = priv->job.active;
|
||||
|
||||
if (!qemuDomainTrackJob(job))
|
||||
job = QEMU_JOB_NONE;
|
||||
job = VIR_JOB_NONE;
|
||||
|
||||
if (job == QEMU_JOB_NONE &&
|
||||
priv->job.asyncJob == QEMU_ASYNC_JOB_NONE)
|
||||
if (job == VIR_JOB_NONE &&
|
||||
priv->job.asyncJob == VIR_ASYNC_JOB_NONE)
|
||||
return 0;
|
||||
|
||||
virBufferAsprintf(&attrBuf, " type='%s' async='%s'",
|
||||
qemuDomainJobTypeToString(job),
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
virDomainJobTypeToString(job),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
|
||||
if (priv->job.phase) {
|
||||
virBufferAsprintf(&attrBuf, " phase='%s'",
|
||||
qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
|
||||
virDomainAsyncJobPhaseToString(priv->job.asyncJob,
|
||||
priv->job.phase));
|
||||
}
|
||||
|
||||
if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE)
|
||||
if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE)
|
||||
virBufferAsprintf(&attrBuf, " flags='0x%lx'", priv->job.apiFlags);
|
||||
|
||||
if (priv->job.cb &&
|
||||
|
@ -1255,7 +1223,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
|
|||
if ((tmp = virXPathString("string(@type)", ctxt))) {
|
||||
int type;
|
||||
|
||||
if ((type = qemuDomainJobTypeFromString(tmp)) < 0) {
|
||||
if ((type = virDomainJobTypeFromString(tmp)) < 0) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR,
|
||||
_("Unknown job type %s"), tmp);
|
||||
return -1;
|
||||
|
@ -1267,7 +1235,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
|
|||
if ((tmp = virXPathString("string(@async)", ctxt))) {
|
||||
int async;
|
||||
|
||||
if ((async = qemuDomainAsyncJobTypeFromString(tmp)) < 0) {
|
||||
if ((async = virDomainAsyncJobTypeFromString(tmp)) < 0) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR,
|
||||
_("Unknown async job type %s"), tmp);
|
||||
return -1;
|
||||
|
@ -1276,7 +1244,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
|
|||
priv->job.asyncJob = async;
|
||||
|
||||
if ((tmp = virXPathString("string(@phase)", ctxt))) {
|
||||
priv->job.phase = qemuDomainAsyncJobPhaseFromString(async, tmp);
|
||||
priv->job.phase = virDomainAsyncJobPhaseFromString(async, tmp);
|
||||
if (priv->job.phase < 0) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR,
|
||||
_("Unknown job phase %s"), tmp);
|
||||
|
|
|
@ -24,61 +24,14 @@
|
|||
|
||||
#define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1))
|
||||
#define QEMU_JOB_DEFAULT_MASK \
|
||||
(JOB_MASK(QEMU_JOB_QUERY) | \
|
||||
JOB_MASK(QEMU_JOB_DESTROY) | \
|
||||
JOB_MASK(QEMU_JOB_ABORT))
|
||||
(JOB_MASK(VIR_JOB_QUERY) | \
|
||||
JOB_MASK(VIR_JOB_DESTROY) | \
|
||||
JOB_MASK(VIR_JOB_ABORT))
|
||||
|
||||
/* Jobs which have to be tracked in domain state XML. */
|
||||
#define QEMU_DOMAIN_TRACK_JOBS \
|
||||
(JOB_MASK(QEMU_JOB_DESTROY) | \
|
||||
JOB_MASK(QEMU_JOB_ASYNC))
|
||||
|
||||
/* Only 1 job is allowed at any time
|
||||
* A job includes *all* monitor commands, even those just querying
|
||||
* information, not merely actions */
|
||||
typedef enum {
|
||||
QEMU_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
|
||||
QEMU_JOB_QUERY, /* Doesn't change any state */
|
||||
QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
|
||||
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
|
||||
QEMU_JOB_MODIFY, /* May change state */
|
||||
QEMU_JOB_ABORT, /* Abort current async job */
|
||||
QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
|
||||
|
||||
/* The following two items must always be the last items before JOB_LAST */
|
||||
QEMU_JOB_ASYNC, /* Asynchronous job */
|
||||
QEMU_JOB_ASYNC_NESTED, /* Normal job within an async job */
|
||||
|
||||
QEMU_JOB_LAST
|
||||
} qemuDomainJob;
|
||||
VIR_ENUM_DECL(qemuDomainJob);
|
||||
|
||||
typedef enum {
|
||||
QEMU_AGENT_JOB_NONE = 0, /* No agent job. */
|
||||
QEMU_AGENT_JOB_QUERY, /* Does not change state of domain */
|
||||
QEMU_AGENT_JOB_MODIFY, /* May change state of domain */
|
||||
|
||||
QEMU_AGENT_JOB_LAST
|
||||
} qemuDomainAgentJob;
|
||||
VIR_ENUM_DECL(qemuDomainAgentJob);
|
||||
|
||||
/* Async job consists of a series of jobs that may change state. Independent
|
||||
* jobs that do not change state (and possibly others if explicitly allowed by
|
||||
* current async job) are allowed to be run even if async job is active.
|
||||
*/
|
||||
typedef enum {
|
||||
QEMU_ASYNC_JOB_NONE = 0,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
QEMU_ASYNC_JOB_SAVE,
|
||||
QEMU_ASYNC_JOB_DUMP,
|
||||
QEMU_ASYNC_JOB_SNAPSHOT,
|
||||
QEMU_ASYNC_JOB_START,
|
||||
QEMU_ASYNC_JOB_BACKUP,
|
||||
|
||||
QEMU_ASYNC_JOB_LAST
|
||||
} qemuDomainAsyncJob;
|
||||
VIR_ENUM_DECL(qemuDomainAsyncJob);
|
||||
(JOB_MASK(VIR_JOB_DESTROY) | \
|
||||
JOB_MASK(VIR_JOB_ASYNC))
|
||||
|
||||
|
||||
typedef enum {
|
||||
|
@ -144,21 +97,21 @@ struct _qemuDomainJobObj {
|
|||
|
||||
int jobsQueued;
|
||||
|
||||
/* The following members are for QEMU_JOB_* */
|
||||
qemuDomainJob active; /* Currently running job */
|
||||
/* The following members are for VIR_JOB_* */
|
||||
virDomainJob active; /* Currently running job */
|
||||
unsigned long long owner; /* Thread id which set current job */
|
||||
char *ownerAPI; /* The API which owns the job */
|
||||
unsigned long long started; /* When the current job started */
|
||||
|
||||
/* The following members are for QEMU_AGENT_JOB_* */
|
||||
qemuDomainAgentJob agentActive; /* Currently running agent job */
|
||||
/* The following members are for VIR_AGENT_JOB_* */
|
||||
virDomainAgentJob agentActive; /* Currently running agent job */
|
||||
unsigned long long agentOwner; /* Thread id which set current agent job */
|
||||
char *agentOwnerAPI; /* The API which owns the agent job */
|
||||
unsigned long long agentStarted; /* When the current agent job started */
|
||||
|
||||
/* The following members are for QEMU_ASYNC_JOB_* */
|
||||
/* The following members are for VIR_ASYNC_JOB_* */
|
||||
virCond asyncCond; /* Use to coordinate with async jobs */
|
||||
qemuDomainAsyncJob asyncJob; /* Currently active async job */
|
||||
virDomainAsyncJob asyncJob; /* Currently active async job */
|
||||
unsigned long long asyncOwner; /* Thread which set current async job */
|
||||
char *asyncOwnerAPI; /* The API which owns the async job */
|
||||
unsigned long long asyncStarted; /* When the current async job started */
|
||||
|
@ -177,9 +130,9 @@ struct _qemuDomainJobObj {
|
|||
void qemuDomainJobSetStatsType(virDomainJobData *jobData,
|
||||
qemuDomainJobStatsType type);
|
||||
|
||||
const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
|
||||
const char *virDomainAsyncJobPhaseToString(virDomainAsyncJob job,
|
||||
int phase);
|
||||
int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
|
||||
int virDomainAsyncJobPhaseFromString(virDomainAsyncJob job,
|
||||
const char *phase);
|
||||
|
||||
void qemuDomainEventEmitJobCompleted(virQEMUDriver *driver,
|
||||
|
@ -187,25 +140,25 @@ void qemuDomainEventEmitJobCompleted(virQEMUDriver *driver,
|
|||
|
||||
int qemuDomainObjBeginJob(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainJob job)
|
||||
virDomainJob job)
|
||||
G_GNUC_WARN_UNUSED_RESULT;
|
||||
int qemuDomainObjBeginAgentJob(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainAgentJob agentJob)
|
||||
virDomainAgentJob agentJob)
|
||||
G_GNUC_WARN_UNUSED_RESULT;
|
||||
int qemuDomainObjBeginAsyncJob(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virDomainJobOperation operation,
|
||||
unsigned long apiFlags)
|
||||
G_GNUC_WARN_UNUSED_RESULT;
|
||||
int qemuDomainObjBeginNestedJob(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
G_GNUC_WARN_UNUSED_RESULT;
|
||||
int qemuDomainObjBeginJobNowait(virQEMUDriver *driver,
|
||||
virDomainObj *obj,
|
||||
qemuDomainJob job)
|
||||
virDomainJob job)
|
||||
G_GNUC_WARN_UNUSED_RESULT;
|
||||
|
||||
void qemuDomainObjEndJob(virDomainObj *obj);
|
||||
|
@ -235,7 +188,7 @@ int qemuDomainJobDataToParams(virDomainJobData *jobData,
|
|||
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2)
|
||||
ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4);
|
||||
|
||||
bool qemuDomainTrackJob(qemuDomainJob job);
|
||||
bool qemuDomainTrackJob(virDomainJob job);
|
||||
|
||||
void qemuDomainObjClearJob(qemuDomainJobObj *job);
|
||||
G_DEFINE_AUTO_CLEANUP_CLEAR_FUNC(qemuDomainJobObj, qemuDomainObjClearJob);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -369,7 +369,7 @@ qemuDomainChangeMediaLegacy(virQEMUDriver *driver,
|
|||
int
|
||||
qemuHotplugAttachDBusVMState(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
g_autoptr(virJSONValue) props = NULL;
|
||||
|
@ -414,7 +414,7 @@ qemuHotplugAttachDBusVMState(virQEMUDriver *driver,
|
|||
int
|
||||
qemuHotplugRemoveDBusVMState(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
int ret;
|
||||
|
@ -452,7 +452,7 @@ static int
|
|||
qemuHotplugAttachManagedPR(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
virStorageSource *src,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
g_autoptr(virJSONValue) props = NULL;
|
||||
|
@ -502,7 +502,7 @@ qemuHotplugAttachManagedPR(virQEMUDriver *driver,
|
|||
static int
|
||||
qemuHotplugRemoveManagedPR(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virErrorPtr orig_err;
|
||||
|
@ -672,7 +672,7 @@ qemuDomainChangeEjectableMedia(virQEMUDriver *driver,
|
|||
if (qemuDomainStorageSourceChainAccessAllow(driver, vm, newsrc) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (qemuHotplugAttachManagedPR(driver, vm, newsrc, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuHotplugAttachManagedPR(driver, vm, newsrc, VIR_ASYNC_JOB_NONE) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV))
|
||||
|
@ -700,7 +700,7 @@ qemuDomainChangeEjectableMedia(virQEMUDriver *driver,
|
|||
|
||||
/* remove PR manager object if unneeded */
|
||||
if (managedpr)
|
||||
ignore_value(qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE));
|
||||
ignore_value(qemuHotplugRemoveManagedPR(driver, vm, VIR_ASYNC_JOB_NONE));
|
||||
|
||||
/* revert old image do the disk definition */
|
||||
if (oldsrc)
|
||||
|
@ -714,7 +714,7 @@ static qemuSnapshotDiskContext *
|
|||
qemuDomainAttachDiskGenericTransient(virDomainObj *vm,
|
||||
virDomainDiskDef *disk,
|
||||
GHashTable *blockNamedNodeData,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
|
||||
g_autoptr(virDomainSnapshotDiskDef) snapdiskdef = NULL;
|
||||
|
@ -741,7 +741,7 @@ int
|
|||
qemuDomainAttachDiskGeneric(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
virDomainDiskDef *disk,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
g_autoptr(qemuBlockStorageSourceChainData) data = NULL;
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
@ -1089,10 +1089,10 @@ qemuDomainAttachDeviceDiskLiveInternal(virQEMUDriver *driver,
|
|||
if (qemuDomainPrepareDiskSource(disk, priv, cfg) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (qemuHotplugAttachManagedPR(driver, vm, disk->src, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuHotplugAttachManagedPR(driver, vm, disk->src, VIR_ASYNC_JOB_NONE) < 0)
|
||||
goto cleanup;
|
||||
|
||||
ret = qemuDomainAttachDiskGeneric(driver, vm, disk, QEMU_ASYNC_JOB_NONE);
|
||||
ret = qemuDomainAttachDiskGeneric(driver, vm, disk, VIR_ASYNC_JOB_NONE);
|
||||
|
||||
virDomainAuditDisk(vm, NULL, disk->src, "attach", ret == 0);
|
||||
|
||||
|
@ -1113,7 +1113,7 @@ qemuDomainAttachDeviceDiskLiveInternal(virQEMUDriver *driver,
|
|||
ignore_value(qemuDomainStorageSourceChainAccessRevoke(driver, vm, disk->src));
|
||||
|
||||
if (virStorageSourceChainHasManagedPR(disk->src))
|
||||
ignore_value(qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE));
|
||||
ignore_value(qemuHotplugRemoveManagedPR(driver, vm, VIR_ASYNC_JOB_NONE));
|
||||
}
|
||||
qemuDomainSecretDiskDestroy(disk);
|
||||
|
||||
|
@ -1774,7 +1774,7 @@ qemuDomainAttachHostPCIDevice(virQEMUDriver *driver,
|
|||
void
|
||||
qemuDomainDelTLSObjects(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
const char *secAlias,
|
||||
const char *tlsAlias)
|
||||
{
|
||||
|
@ -1805,7 +1805,7 @@ qemuDomainDelTLSObjects(virQEMUDriver *driver,
|
|||
int
|
||||
qemuDomainAddTLSObjects(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virJSONValue **secProps,
|
||||
virJSONValue **tlsProps)
|
||||
{
|
||||
|
@ -1907,7 +1907,7 @@ qemuDomainAddChardevTLSObjects(virQEMUDriver *driver,
|
|||
|
||||
dev->data.tcp.tlscreds = true;
|
||||
|
||||
if (qemuDomainAddTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE,
|
||||
if (qemuDomainAddTLSObjects(driver, vm, VIR_ASYNC_JOB_NONE,
|
||||
&secProps, &tlsProps) < 0)
|
||||
return -1;
|
||||
|
||||
|
@ -2013,7 +2013,7 @@ int qemuDomainAttachRedirdevDevice(virQEMUDriver *driver,
|
|||
ignore_value(qemuMonitorDetachCharDev(priv->mon, charAlias));
|
||||
qemuDomainObjExitMonitor(vm);
|
||||
virErrorRestore(&orig_err);
|
||||
qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE,
|
||||
qemuDomainDelTLSObjects(driver, vm, VIR_ASYNC_JOB_NONE,
|
||||
secAlias, tlsAlias);
|
||||
goto audit;
|
||||
}
|
||||
|
@ -2308,7 +2308,7 @@ qemuDomainAttachChrDevice(virQEMUDriver *driver,
|
|||
qemuDomainObjExitMonitor(vm);
|
||||
virErrorRestore(&orig_err);
|
||||
|
||||
qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE,
|
||||
qemuDomainDelTLSObjects(driver, vm, VIR_ASYNC_JOB_NONE,
|
||||
secAlias, tlsAlias);
|
||||
goto audit;
|
||||
}
|
||||
|
@ -2414,7 +2414,7 @@ qemuDomainAttachRNGDevice(virQEMUDriver *driver,
|
|||
qemuDomainObjExitMonitor(vm);
|
||||
virErrorRestore(&orig_err);
|
||||
|
||||
qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE,
|
||||
qemuDomainDelTLSObjects(driver, vm, VIR_ASYNC_JOB_NONE,
|
||||
secAlias, tlsAlias);
|
||||
goto audit;
|
||||
}
|
||||
|
@ -2510,14 +2510,14 @@ qemuDomainAttachMemory(virQEMUDriver *driver,
|
|||
virObjectEventStateQueue(driver->domainEventState, event);
|
||||
|
||||
/* fix the balloon size */
|
||||
ignore_value(qemuProcessRefreshBalloonState(driver, vm, QEMU_ASYNC_JOB_NONE));
|
||||
ignore_value(qemuProcessRefreshBalloonState(driver, vm, VIR_ASYNC_JOB_NONE));
|
||||
|
||||
/* mem is consumed by vm->def */
|
||||
mem = NULL;
|
||||
|
||||
/* this step is best effort, removing the device would be so much trouble */
|
||||
ignore_value(qemuDomainUpdateMemoryDeviceInfo(driver, vm,
|
||||
QEMU_ASYNC_JOB_NONE));
|
||||
VIR_ASYNC_JOB_NONE));
|
||||
|
||||
ret = 0;
|
||||
|
||||
|
@ -4353,7 +4353,7 @@ qemuDomainChangeGraphics(virQEMUDriver *driver,
|
|||
VIR_DOMAIN_GRAPHICS_TYPE_VNC,
|
||||
&dev->data.vnc.auth,
|
||||
cfg->vncPassword,
|
||||
QEMU_ASYNC_JOB_NONE) < 0)
|
||||
VIR_ASYNC_JOB_NONE) < 0)
|
||||
return -1;
|
||||
|
||||
/* Steal the new dev's char * reference */
|
||||
|
@ -4400,7 +4400,7 @@ qemuDomainChangeGraphics(virQEMUDriver *driver,
|
|||
VIR_DOMAIN_GRAPHICS_TYPE_SPICE,
|
||||
&dev->data.spice.auth,
|
||||
cfg->spicePassword,
|
||||
QEMU_ASYNC_JOB_NONE) < 0)
|
||||
VIR_ASYNC_JOB_NONE) < 0)
|
||||
return -1;
|
||||
|
||||
/* Steal the new dev's char * reference */
|
||||
|
@ -4532,7 +4532,7 @@ qemuDomainRemoveDiskDevice(virQEMUDriver *driver,
|
|||
qemuDomainStorageSourceChainAccessRevoke(driver, vm, disk->src);
|
||||
|
||||
if (virStorageSourceChainHasManagedPR(disk->src) &&
|
||||
qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
qemuHotplugRemoveManagedPR(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (disk->transient) {
|
||||
|
@ -4619,7 +4619,7 @@ qemuDomainRemoveMemoryDevice(virQEMUDriver *driver,
|
|||
virDomainMemoryDefFree(mem);
|
||||
|
||||
/* fix the balloon size */
|
||||
ignore_value(qemuProcessRefreshBalloonState(driver, vm, QEMU_ASYNC_JOB_NONE));
|
||||
ignore_value(qemuProcessRefreshBalloonState(driver, vm, VIR_ASYNC_JOB_NONE));
|
||||
|
||||
/* decrease the mlock limit after memory unplug if necessary */
|
||||
ignore_value(qemuDomainAdjustMaxMemLock(vm, false));
|
||||
|
@ -6296,7 +6296,7 @@ qemuDomainRemoveVcpu(virQEMUDriver *driver,
|
|||
virErrorPtr save_error = NULL;
|
||||
size_t i;
|
||||
|
||||
if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0)
|
||||
if (qemuDomainRefreshVcpuInfo(driver, vm, VIR_ASYNC_JOB_NONE, false) < 0)
|
||||
return -1;
|
||||
|
||||
/* validation requires us to set the expected state prior to calling it */
|
||||
|
@ -6441,7 +6441,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriver *driver,
|
|||
/* start outputting of the new XML element to allow keeping unpluggability */
|
||||
vm->def->individualvcpus = true;
|
||||
|
||||
if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0)
|
||||
if (qemuDomainRefreshVcpuInfo(driver, vm, VIR_ASYNC_JOB_NONE, false) < 0)
|
||||
return -1;
|
||||
|
||||
/* validation requires us to set the expected state prior to calling it */
|
||||
|
|
|
@ -33,13 +33,13 @@ int qemuDomainChangeEjectableMedia(virQEMUDriver *driver,
|
|||
|
||||
void qemuDomainDelTLSObjects(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
const char *secAlias,
|
||||
const char *tlsAlias);
|
||||
|
||||
int qemuDomainAddTLSObjects(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virJSONValue **secProps,
|
||||
virJSONValue **tlsProps);
|
||||
|
||||
|
@ -61,7 +61,7 @@ int qemuDomainAttachDeviceDiskLive(virQEMUDriver *driver,
|
|||
int qemuDomainAttachDiskGeneric(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
virDomainDiskDef *disk,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
int qemuDomainAttachNetDevice(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
|
@ -164,11 +164,11 @@ unsigned long long qemuDomainGetUnplugTimeout(virDomainObj *vm) G_GNUC_NO_INLINE
|
|||
|
||||
int qemuHotplugAttachDBusVMState(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
int qemuHotplugRemoveDBusVMState(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
int qemuDomainChangeMemoryRequestedSize(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
|
|
|
@ -84,7 +84,7 @@ VIR_ENUM_IMPL(qemuMigrationJobPhase,
|
|||
static int
|
||||
qemuMigrationJobStart(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob job,
|
||||
virDomainAsyncJob job,
|
||||
unsigned long apiFlags)
|
||||
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
|
||||
|
||||
|
@ -104,7 +104,7 @@ qemuMigrationJobContinue(virDomainObj *obj)
|
|||
|
||||
static bool
|
||||
qemuMigrationJobIsActive(virDomainObj *vm,
|
||||
qemuDomainAsyncJob job)
|
||||
virDomainAsyncJob job)
|
||||
ATTRIBUTE_NONNULL(1);
|
||||
|
||||
static void
|
||||
|
@ -149,7 +149,7 @@ qemuMigrationSrcRestoreDomainState(virQEMUDriver *driver, virDomainObj *vm)
|
|||
/* we got here through some sort of failure; start the domain again */
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT) < 0) {
|
||||
/* Hm, we already know we are in error here. We don't want to
|
||||
* overwrite the previous error, though, so we just throw something
|
||||
* to the logs and hope for the best */
|
||||
|
@ -501,7 +501,7 @@ qemuMigrationDstStartNBDServer(virQEMUDriver *driver,
|
|||
}
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
||||
VIR_ASYNC_JOB_MIGRATION_IN) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (!server_started) {
|
||||
|
@ -542,7 +542,7 @@ qemuMigrationDstStopNBDServer(virQEMUDriver *driver,
|
|||
return 0;
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
||||
VIR_ASYNC_JOB_MIGRATION_IN) < 0)
|
||||
return -1;
|
||||
|
||||
if (qemuMonitorNBDServerStop(priv->mon) < 0)
|
||||
|
@ -583,7 +583,7 @@ qemuMigrationNBDReportMirrorError(qemuBlockJobData *job,
|
|||
*/
|
||||
static int
|
||||
qemuMigrationSrcNBDStorageCopyReady(virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
size_t i;
|
||||
size_t notReady = 0;
|
||||
|
@ -638,7 +638,7 @@ qemuMigrationSrcNBDStorageCopyReady(virDomainObj *vm,
|
|||
*/
|
||||
static int
|
||||
qemuMigrationSrcNBDCopyCancelled(virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
bool abortMigration)
|
||||
{
|
||||
size_t i;
|
||||
|
@ -722,7 +722,7 @@ qemuMigrationSrcNBDCopyCancelOne(virQEMUDriver *driver,
|
|||
virDomainDiskDef *disk,
|
||||
qemuBlockJobData *job,
|
||||
bool abortMigration,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
int rv;
|
||||
|
@ -772,7 +772,7 @@ static int
|
|||
qemuMigrationSrcNBDCopyCancel(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
bool abortMigration,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virConnectPtr dconn)
|
||||
{
|
||||
virErrorPtr err = NULL;
|
||||
|
@ -855,7 +855,7 @@ qemuMigrationSrcNBDCopyCancel(virQEMUDriver *driver,
|
|||
|
||||
static int
|
||||
qemuMigrationSrcCancelRemoveTempBitmaps(virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virQEMUDriver *driver = priv->driver;
|
||||
|
@ -952,7 +952,7 @@ qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriver *driver,
|
|||
return -1;
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
return -1;
|
||||
|
||||
mon_ret = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), data);
|
||||
|
@ -1001,7 +1001,7 @@ qemuMigrationSrcNBDStorageCopyDriveMirror(virQEMUDriver *driver,
|
|||
}
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
return -1;
|
||||
|
||||
mon_ret = qemuMonitorDriveMirror(qemuDomainGetMonitor(vm),
|
||||
|
@ -1199,14 +1199,14 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriver *driver,
|
|||
}
|
||||
}
|
||||
|
||||
while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) != 1) {
|
||||
while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, VIR_ASYNC_JOB_MIGRATION_OUT)) != 1) {
|
||||
if (rv < 0)
|
||||
return -1;
|
||||
|
||||
if (priv->job.abortJob) {
|
||||
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
|
||||
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
_("canceled by client"));
|
||||
return -1;
|
||||
}
|
||||
|
@ -1221,7 +1221,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriver *driver,
|
|||
return -1;
|
||||
}
|
||||
|
||||
qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
qemuMigrationSrcFetchMirrorStats(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
priv->job.current);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1599,7 +1599,7 @@ qemuMigrationAnyPostcopyFailed(virQEMUDriver *driver,
|
|||
if (state == VIR_DOMAIN_RUNNING) {
|
||||
if (qemuProcessStopCPUs(driver, vm,
|
||||
VIR_DOMAIN_PAUSED_POSTCOPY_FAILED,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
||||
VIR_ASYNC_JOB_MIGRATION_IN) < 0)
|
||||
VIR_WARN("Unable to pause guest CPUs for %s", vm->def->name);
|
||||
} else {
|
||||
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
|
||||
|
@ -1673,7 +1673,7 @@ qemuMigrationUpdateJobType(virDomainJobData *jobData)
|
|||
int
|
||||
qemuMigrationAnyFetchStats(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virDomainJobData *jobData,
|
||||
char **error)
|
||||
{
|
||||
|
@ -1703,23 +1703,23 @@ qemuMigrationJobName(virDomainObj *vm)
|
|||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
||||
switch (priv->job.asyncJob) {
|
||||
case QEMU_ASYNC_JOB_MIGRATION_OUT:
|
||||
case VIR_ASYNC_JOB_MIGRATION_OUT:
|
||||
return _("migration out job");
|
||||
case QEMU_ASYNC_JOB_SAVE:
|
||||
case VIR_ASYNC_JOB_SAVE:
|
||||
return _("domain save job");
|
||||
case QEMU_ASYNC_JOB_DUMP:
|
||||
case VIR_ASYNC_JOB_DUMP:
|
||||
return _("domain core dump job");
|
||||
case QEMU_ASYNC_JOB_NONE:
|
||||
case VIR_ASYNC_JOB_NONE:
|
||||
return _("undefined");
|
||||
case QEMU_ASYNC_JOB_MIGRATION_IN:
|
||||
case VIR_ASYNC_JOB_MIGRATION_IN:
|
||||
return _("migration in job");
|
||||
case QEMU_ASYNC_JOB_SNAPSHOT:
|
||||
case VIR_ASYNC_JOB_SNAPSHOT:
|
||||
return _("snapshot job");
|
||||
case QEMU_ASYNC_JOB_START:
|
||||
case VIR_ASYNC_JOB_START:
|
||||
return _("start job");
|
||||
case QEMU_ASYNC_JOB_BACKUP:
|
||||
case VIR_ASYNC_JOB_BACKUP:
|
||||
return _("backup job");
|
||||
case QEMU_ASYNC_JOB_LAST:
|
||||
case VIR_ASYNC_JOB_LAST:
|
||||
default:
|
||||
return _("job");
|
||||
}
|
||||
|
@ -1729,7 +1729,7 @@ qemuMigrationJobName(virDomainObj *vm)
|
|||
static int
|
||||
qemuMigrationJobCheckStatus(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainJobData *jobData = priv->job.current;
|
||||
|
@ -1793,7 +1793,7 @@ enum qemuMigrationCompletedFlags {
|
|||
static int
|
||||
qemuMigrationAnyCompleted(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virConnectPtr dconn,
|
||||
unsigned int flags)
|
||||
{
|
||||
|
@ -1884,7 +1884,7 @@ qemuMigrationAnyCompleted(virQEMUDriver *driver,
|
|||
static int
|
||||
qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virConnectPtr dconn,
|
||||
unsigned int flags)
|
||||
{
|
||||
|
@ -1925,7 +1925,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
|
|||
priv->job.completed = virDomainJobDataCopy(jobData);
|
||||
priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
|
||||
|
||||
if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT &&
|
||||
if (asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT &&
|
||||
jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
|
||||
jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
|
||||
|
||||
|
@ -1936,7 +1936,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
|
|||
static int
|
||||
qemuMigrationDstWaitForCompletion(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
bool postcopy)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
@ -2046,7 +2046,7 @@ qemuMigrationSrcGraphicsRelocate(virQEMUDriver *driver,
|
|||
}
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT) == 0) {
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
|
||||
ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
|
||||
|
@ -2139,7 +2139,7 @@ int
|
|||
qemuMigrationDstRun(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
const char *uri,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
int rv;
|
||||
|
@ -2160,7 +2160,7 @@ qemuMigrationDstRun(virQEMUDriver *driver,
|
|||
if (rv < 0)
|
||||
return -1;
|
||||
|
||||
if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
|
||||
if (asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
|
||||
/* qemuMigrationDstWaitForCompletion is called from the Finish phase */
|
||||
return 0;
|
||||
}
|
||||
|
@ -2189,11 +2189,11 @@ qemuMigrationSrcCleanup(virDomainObj *vm,
|
|||
|
||||
VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
|
||||
vm->def->name, conn,
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobPhaseToString(priv->job.asyncJob,
|
||||
priv->job.phase));
|
||||
|
||||
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
|
||||
if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT))
|
||||
return;
|
||||
|
||||
VIR_DEBUG("The connection which started outgoing migration of domain %s"
|
||||
|
@ -2210,7 +2210,7 @@ qemuMigrationSrcCleanup(virDomainObj *vm,
|
|||
VIR_WARN("Migration of domain %s finished but we don't know if the"
|
||||
" domain was successfully started on destination or not",
|
||||
vm->def->name);
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
jobPriv->migParams, priv->job.apiFlags);
|
||||
/* clear the job and let higher levels decide what to do */
|
||||
qemuMigrationJobFinish(vm);
|
||||
|
@ -2344,11 +2344,11 @@ qemuMigrationSrcBeginPhase(virQEMUDriver *driver,
|
|||
cookieout, cookieoutlen, nmigrate_disks,
|
||||
migrate_disks, flags);
|
||||
|
||||
/* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT.
|
||||
/* Only set the phase if we are inside VIR_ASYNC_JOB_MIGRATION_OUT.
|
||||
* Otherwise we will start the async job later in the perform phase losing
|
||||
* change protection.
|
||||
*/
|
||||
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
|
||||
qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_BEGIN3);
|
||||
|
||||
if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
|
||||
|
@ -2505,7 +2505,7 @@ qemuMigrationSrcBegin(virConnectPtr conn,
|
|||
virQEMUDriver *driver = conn->privateData;
|
||||
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
||||
char *xml = NULL;
|
||||
qemuDomainAsyncJob asyncJob;
|
||||
virDomainAsyncJob asyncJob;
|
||||
|
||||
if (cfg->migrateTLSForce &&
|
||||
!(flags & VIR_MIGRATE_TUNNELLED) &&
|
||||
|
@ -2516,14 +2516,14 @@ qemuMigrationSrcBegin(virConnectPtr conn,
|
|||
}
|
||||
|
||||
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
|
||||
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
if (qemuMigrationJobStart(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
flags) < 0)
|
||||
goto cleanup;
|
||||
asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT;
|
||||
asyncJob = VIR_ASYNC_JOB_MIGRATION_OUT;
|
||||
} else {
|
||||
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
||||
if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
|
||||
goto cleanup;
|
||||
asyncJob = QEMU_ASYNC_JOB_NONE;
|
||||
asyncJob = VIR_ASYNC_JOB_NONE;
|
||||
}
|
||||
|
||||
qemuMigrationSrcStoreDomainState(vm);
|
||||
|
@ -2583,13 +2583,13 @@ qemuMigrationDstPrepareCleanup(virQEMUDriver *driver,
|
|||
VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
|
||||
driver,
|
||||
vm->def->name,
|
||||
qemuDomainJobTypeToString(priv->job.active),
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
virDomainJobTypeToString(priv->job.active),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
|
||||
virPortAllocatorRelease(priv->migrationPort);
|
||||
priv->migrationPort = 0;
|
||||
|
||||
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
|
||||
if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_IN))
|
||||
return;
|
||||
qemuDomainObjDiscardAsyncJob(vm);
|
||||
}
|
||||
|
@ -2694,7 +2694,7 @@ qemuMigrationDstPrepareAnyBlockDirtyBitmaps(virDomainObj *vm,
|
|||
if (qemuMigrationCookieBlockDirtyBitmapsMatchDisks(vm->def, mig->blockDirtyBitmaps) < 0)
|
||||
return -1;
|
||||
|
||||
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_MIGRATION_IN)))
|
||||
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_MIGRATION_IN)))
|
||||
return -1;
|
||||
|
||||
for (nextdisk = mig->blockDirtyBitmaps; nextdisk; nextdisk = nextdisk->next) {
|
||||
|
@ -2925,7 +2925,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
|
|||
!!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
if (qemuMigrationJobStart(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
flags) < 0)
|
||||
goto cleanup;
|
||||
qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PREPARE);
|
||||
|
@ -2942,7 +2942,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
|
|||
|
||||
startFlags = VIR_QEMU_PROCESS_START_AUTODESTROY;
|
||||
|
||||
if (qemuProcessInit(driver, vm, mig->cpu, QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
if (qemuProcessInit(driver, vm, mig->cpu, VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
true, startFlags) < 0)
|
||||
goto stopjob;
|
||||
stopProcess = true;
|
||||
|
@ -2958,7 +2958,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
|
|||
if (qemuProcessPrepareHost(driver, vm, startFlags) < 0)
|
||||
goto stopjob;
|
||||
|
||||
rv = qemuProcessLaunch(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
rv = qemuProcessLaunch(dconn, driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
incoming, NULL,
|
||||
VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
|
||||
startFlags);
|
||||
|
@ -2987,7 +2987,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
|
|||
if (qemuMigrationDstPrepareAnyBlockDirtyBitmaps(vm, mig, migParams, flags) < 0)
|
||||
goto stopjob;
|
||||
|
||||
if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
if (qemuMigrationParamsCheck(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
migParams, mig->caps->automatic) < 0)
|
||||
goto stopjob;
|
||||
|
||||
|
@ -2995,7 +2995,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
|
|||
* set the migration TLS parameters */
|
||||
if (flags & VIR_MIGRATE_TLS) {
|
||||
if (qemuMigrationParamsEnableTLS(driver, vm, true,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
&tlsAlias, NULL,
|
||||
migParams) < 0)
|
||||
goto stopjob;
|
||||
|
@ -3004,7 +3004,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
|
|||
goto stopjob;
|
||||
}
|
||||
|
||||
if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
migParams) < 0)
|
||||
goto stopjob;
|
||||
|
||||
|
@ -3042,10 +3042,10 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
|
|||
|
||||
if (incoming->deferredURI &&
|
||||
qemuMigrationDstRun(driver, vm, incoming->deferredURI,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
||||
VIR_ASYNC_JOB_MIGRATION_IN) < 0)
|
||||
goto stopjob;
|
||||
|
||||
if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
if (qemuProcessFinishStartup(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
false, VIR_DOMAIN_PAUSED_MIGRATION) < 0)
|
||||
goto stopjob;
|
||||
|
||||
|
@ -3110,7 +3110,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
|
|||
return ret;
|
||||
|
||||
stopjob:
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
jobPriv->migParams, priv->job.apiFlags);
|
||||
|
||||
if (stopProcess) {
|
||||
|
@ -3119,7 +3119,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
|
|||
stopFlags |= VIR_QEMU_PROCESS_STOP_NO_RELABEL;
|
||||
virDomainAuditStart(vm, "migrated", false);
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN, stopFlags);
|
||||
VIR_ASYNC_JOB_MIGRATION_IN, stopFlags);
|
||||
}
|
||||
|
||||
qemuMigrationJobFinish(vm);
|
||||
|
@ -3425,7 +3425,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
|
|||
*/
|
||||
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
|
||||
reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
|
||||
qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
qemuMigrationAnyFetchStats(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
jobData, NULL) < 0)
|
||||
VIR_WARN("Could not refresh migration statistics");
|
||||
|
||||
|
@ -3448,7 +3448,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
|
|||
qemuMigrationSrcWaitForSpice(vm);
|
||||
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
||||
virDomainAuditStop(vm, "migrated");
|
||||
|
||||
|
@ -3465,7 +3465,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
|
|||
|
||||
/* cancel any outstanding NBD jobs */
|
||||
qemuMigrationSrcNBDCopyCancel(driver, vm, false,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT, NULL);
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT, NULL);
|
||||
|
||||
virErrorRestore(&orig_err);
|
||||
|
||||
|
@ -3475,7 +3475,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
|
|||
else
|
||||
qemuMigrationSrcRestoreDomainState(driver, vm);
|
||||
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
jobPriv->migParams, priv->job.apiFlags);
|
||||
|
||||
qemuDomainSaveStatus(vm);
|
||||
|
@ -3496,7 +3496,7 @@ qemuMigrationSrcConfirm(virQEMUDriver *driver,
|
|||
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
||||
int ret = -1;
|
||||
|
||||
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
|
||||
if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT))
|
||||
goto cleanup;
|
||||
|
||||
if (cancelled)
|
||||
|
@ -3816,7 +3816,7 @@ static int
|
|||
qemuMigrationSrcContinue(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuMonitorMigrationStatus status,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
int ret;
|
||||
|
@ -3841,10 +3841,10 @@ qemuMigrationSetDBusVMState(virQEMUDriver *driver,
|
|||
if (priv->dbusVMStateIds) {
|
||||
int rv;
|
||||
|
||||
if (qemuHotplugAttachDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuHotplugAttachDBusVMState(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
|
||||
return -1;
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
|
||||
return -1;
|
||||
|
||||
rv = qemuMonitorSetDBusVMStateIdList(priv->mon, priv->dbusVMStateIds);
|
||||
|
@ -3853,7 +3853,7 @@ qemuMigrationSetDBusVMState(virQEMUDriver *driver,
|
|||
|
||||
return rv;
|
||||
} else {
|
||||
if (qemuHotplugRemoveDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuHotplugRemoveDBusVMState(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -3888,7 +3888,7 @@ qemuMigrationSrcRunPrepareBlockDirtyBitmapsMerge(virDomainObj *vm,
|
|||
GSList *nextdisk;
|
||||
int rc;
|
||||
|
||||
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)))
|
||||
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_MIGRATION_OUT)))
|
||||
return -1;
|
||||
|
||||
for (nextdisk = mig->blockDirtyBitmaps; nextdisk; nextdisk = nextdisk->next) {
|
||||
|
@ -3944,7 +3944,7 @@ qemuMigrationSrcRunPrepareBlockDirtyBitmapsMerge(virDomainObj *vm,
|
|||
}
|
||||
}
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
return -1;
|
||||
|
||||
rc = qemuMonitorTransaction(priv->mon, &actions);
|
||||
|
@ -4107,7 +4107,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
|||
qemuMigrationSrcRunPrepareBlockDirtyBitmaps(vm, mig, migParams, flags) < 0)
|
||||
goto error;
|
||||
|
||||
if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
if (qemuMigrationParamsCheck(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
migParams, mig->caps->automatic) < 0)
|
||||
goto error;
|
||||
|
||||
|
@ -4121,7 +4121,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
|||
hostname = spec->dest.host.name;
|
||||
|
||||
if (qemuMigrationParamsEnableTLS(driver, vm, false,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
&tlsAlias, hostname,
|
||||
migParams) < 0)
|
||||
goto error;
|
||||
|
@ -4135,7 +4135,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
|||
priv->migMaxBandwidth * 1024 * 1024) < 0)
|
||||
goto error;
|
||||
|
||||
if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
migParams) < 0)
|
||||
goto error;
|
||||
|
||||
|
@ -4188,12 +4188,12 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
|||
if (!(flags & VIR_MIGRATE_LIVE) &&
|
||||
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
||||
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
goto error;
|
||||
|
||||
if (priv->job.abortJob) {
|
||||
|
@ -4202,7 +4202,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
|||
* priv->job.abortJob will not change */
|
||||
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
|
||||
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
|
||||
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
_("canceled by client"));
|
||||
goto exit_monitor;
|
||||
}
|
||||
|
@ -4284,7 +4284,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
|||
waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY;
|
||||
|
||||
rc = qemuMigrationSrcWaitForCompletion(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
dconn, waitFlags);
|
||||
if (rc == -2)
|
||||
goto error;
|
||||
|
@ -4308,7 +4308,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
|||
|
||||
if (mig->nbd &&
|
||||
qemuMigrationSrcNBDCopyCancel(driver, vm, false,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
dconn) < 0)
|
||||
goto error;
|
||||
|
||||
|
@ -4319,13 +4319,13 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
|||
if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
|
||||
if (qemuMigrationSrcContinue(driver, vm,
|
||||
QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
goto error;
|
||||
|
||||
waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;
|
||||
|
||||
rc = qemuMigrationSrcWaitForCompletion(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
dconn, waitFlags);
|
||||
if (rc == -2)
|
||||
goto error;
|
||||
|
@ -4380,7 +4380,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
|||
if (cancel &&
|
||||
priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED &&
|
||||
qemuDomainObjEnterMonitorAsync(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT) == 0) {
|
||||
qemuMonitorMigrateCancel(priv->mon);
|
||||
qemuDomainObjExitMonitor(vm);
|
||||
}
|
||||
|
@ -4388,10 +4388,10 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
|||
/* cancel any outstanding NBD jobs */
|
||||
if (mig && mig->nbd)
|
||||
qemuMigrationSrcNBDCopyCancel(driver, vm, true,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
dconn);
|
||||
|
||||
qemuMigrationSrcCancelRemoveTempBitmaps(vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
|
||||
qemuMigrationSrcCancelRemoveTempBitmaps(vm, VIR_ASYNC_JOB_MIGRATION_OUT);
|
||||
|
||||
if (priv->job.current->status != VIR_DOMAIN_JOB_STATUS_CANCELED)
|
||||
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_FAILED;
|
||||
|
@ -5276,7 +5276,7 @@ qemuMigrationSrcPerformJob(virQEMUDriver *driver,
|
|||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
|
||||
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
if (qemuMigrationJobStart(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
flags) < 0)
|
||||
goto cleanup;
|
||||
|
||||
|
@ -5316,7 +5316,7 @@ qemuMigrationSrcPerformJob(virQEMUDriver *driver,
|
|||
*/
|
||||
if (!v3proto) {
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
||||
virDomainAuditStop(vm, "migrated");
|
||||
event = virDomainEventLifecycleNewFromObj(vm,
|
||||
|
@ -5332,7 +5332,7 @@ qemuMigrationSrcPerformJob(virQEMUDriver *driver,
|
|||
* here
|
||||
*/
|
||||
if (!v3proto && ret < 0)
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
jobPriv->migParams, priv->job.apiFlags);
|
||||
|
||||
qemuMigrationSrcRestoreDomainState(driver, vm);
|
||||
|
@ -5380,10 +5380,10 @@ qemuMigrationSrcPerformPhase(virQEMUDriver *driver,
|
|||
|
||||
/* If we didn't start the job in the begin phase, start it now. */
|
||||
if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
|
||||
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
if (qemuMigrationJobStart(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
flags) < 0)
|
||||
return ret;
|
||||
} else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
|
||||
} else if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5409,7 +5409,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriver *driver,
|
|||
|
||||
endjob:
|
||||
if (ret < 0) {
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
jobPriv->migParams, priv->job.apiFlags);
|
||||
qemuMigrationJobFinish(vm);
|
||||
} else {
|
||||
|
@ -5639,7 +5639,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
|
|||
port = priv->migrationPort;
|
||||
priv->migrationPort = 0;
|
||||
|
||||
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) {
|
||||
if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_IN)) {
|
||||
qemuMigrationDstErrorReport(driver, vm->def->name);
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -5675,7 +5675,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
|
|||
/* Check for a possible error on the monitor in case Finish was called
|
||||
* earlier than monitor EOF handler got a chance to process the error
|
||||
*/
|
||||
qemuDomainCheckMonitor(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
|
||||
qemuDomainCheckMonitor(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN);
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
|
@ -5696,7 +5696,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
|
|||
goto endjob;
|
||||
|
||||
if (qemuRefreshVirtioChannelState(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
||||
VIR_ASYNC_JOB_MIGRATION_IN) < 0)
|
||||
goto endjob;
|
||||
|
||||
if (qemuConnectAgent(driver, vm) < 0)
|
||||
|
@ -5725,7 +5725,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
|
|||
* before starting guest CPUs.
|
||||
*/
|
||||
if (qemuMigrationDstWaitForCompletion(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
!!(flags & VIR_MIGRATE_POSTCOPY)) < 0) {
|
||||
/* There's not much we can do for v2 protocol since the
|
||||
* original domain on the source host is already gone.
|
||||
|
@ -5736,7 +5736,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
|
|||
|
||||
/* Now that the state data was transferred we can refresh the actual state
|
||||
* of the devices */
|
||||
if (qemuProcessRefreshState(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
|
||||
if (qemuProcessRefreshState(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN) < 0) {
|
||||
/* Similarly to the case above v2 protocol will not be able to recover
|
||||
* from this. Let's ignore this and perhaps stuff will not break. */
|
||||
if (v3proto)
|
||||
|
@ -5754,7 +5754,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
|
|||
if (qemuProcessStartCPUs(driver, vm,
|
||||
inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY
|
||||
: VIR_DOMAIN_RUNNING_MIGRATED,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
|
||||
VIR_ASYNC_JOB_MIGRATION_IN) < 0) {
|
||||
if (virGetLastErrorCode() == VIR_ERR_OK)
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR,
|
||||
"%s", _("resume operation failed"));
|
||||
|
@ -5793,7 +5793,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
|
|||
|
||||
if (inPostCopy) {
|
||||
if (qemuMigrationDstWaitForCompletion(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
false) < 0) {
|
||||
goto endjob;
|
||||
}
|
||||
|
@ -5840,7 +5840,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
|
|||
virDomainObjIsActive(vm)) {
|
||||
if (doKill) {
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
||||
virDomainAuditStop(vm, "failed");
|
||||
event = virDomainEventLifecycleNewFromObj(vm,
|
||||
|
@ -5873,7 +5873,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
|
|||
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
|
||||
}
|
||||
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
jobPriv->migParams, priv->job.apiFlags);
|
||||
|
||||
qemuMigrationJobFinish(vm);
|
||||
|
@ -5903,7 +5903,7 @@ int
|
|||
qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
|
||||
int fd,
|
||||
virCommand *compressor,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
bool bwParam = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_BANDWIDTH);
|
||||
|
@ -6082,10 +6082,10 @@ qemuMigrationSrcCancel(virQEMUDriver *driver,
|
|||
|
||||
if (storage &&
|
||||
qemuMigrationSrcNBDCopyCancel(driver, vm, true,
|
||||
QEMU_ASYNC_JOB_NONE, NULL) < 0)
|
||||
VIR_ASYNC_JOB_NONE, NULL) < 0)
|
||||
return -1;
|
||||
|
||||
if (qemuMigrationSrcCancelRemoveTempBitmaps(vm, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuMigrationSrcCancelRemoveTempBitmaps(vm, VIR_ASYNC_JOB_NONE) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
|
@ -6095,21 +6095,21 @@ qemuMigrationSrcCancel(virQEMUDriver *driver,
|
|||
static int
|
||||
qemuMigrationJobStart(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob job,
|
||||
virDomainAsyncJob job,
|
||||
unsigned long apiFlags)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainJobOperation op;
|
||||
unsigned long long mask;
|
||||
|
||||
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
|
||||
if (job == VIR_ASYNC_JOB_MIGRATION_IN) {
|
||||
op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN;
|
||||
mask = QEMU_JOB_NONE;
|
||||
mask = VIR_JOB_NONE;
|
||||
} else {
|
||||
op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT;
|
||||
mask = QEMU_JOB_DEFAULT_MASK |
|
||||
JOB_MASK(QEMU_JOB_SUSPEND) |
|
||||
JOB_MASK(QEMU_JOB_MIGRATION_OP);
|
||||
JOB_MASK(VIR_JOB_SUSPEND) |
|
||||
JOB_MASK(VIR_JOB_MIGRATION_OP);
|
||||
}
|
||||
|
||||
if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0)
|
||||
|
@ -6153,14 +6153,14 @@ qemuMigrationJobContinue(virDomainObj *vm)
|
|||
|
||||
static bool
|
||||
qemuMigrationJobIsActive(virDomainObj *vm,
|
||||
qemuDomainAsyncJob job)
|
||||
virDomainAsyncJob job)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
||||
if (priv->job.asyncJob != job) {
|
||||
const char *msg;
|
||||
|
||||
if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
|
||||
if (job == VIR_ASYNC_JOB_MIGRATION_IN)
|
||||
msg = _("domain '%s' is not processing incoming migration");
|
||||
else
|
||||
msg = _("domain '%s' is not being migrated");
|
||||
|
@ -6233,7 +6233,7 @@ qemuMigrationDstErrorReport(virQEMUDriver *driver,
|
|||
int
|
||||
qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virDomainJobData *jobData)
|
||||
{
|
||||
size_t i;
|
||||
|
|
|
@ -210,7 +210,7 @@ qemuMigrationSrcToFile(virQEMUDriver *driver,
|
|||
virDomainObj *vm,
|
||||
int fd,
|
||||
virCommand *compressor,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
|
||||
|
||||
int
|
||||
|
@ -220,7 +220,7 @@ qemuMigrationSrcCancel(virQEMUDriver *driver,
|
|||
int
|
||||
qemuMigrationAnyFetchStats(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virDomainJobData *jobData,
|
||||
char **error);
|
||||
|
||||
|
@ -248,7 +248,7 @@ int
|
|||
qemuMigrationDstRun(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
const char *uri,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
void
|
||||
qemuMigrationAnyPostcopyFailed(virQEMUDriver *driver,
|
||||
|
@ -257,5 +257,5 @@ qemuMigrationAnyPostcopyFailed(virQEMUDriver *driver,
|
|||
int
|
||||
qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virDomainJobData *jobData);
|
||||
|
|
|
@ -850,7 +850,7 @@ qemuMigrationParamsApply(virQEMUDriver *driver,
|
|||
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
||||
return -1;
|
||||
|
||||
if (asyncJob == QEMU_ASYNC_JOB_NONE) {
|
||||
if (asyncJob == VIR_ASYNC_JOB_NONE) {
|
||||
if (!virBitmapIsAllClear(migParams->caps)) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
||||
_("Migration capabilities can only be set by "
|
||||
|
@ -1165,7 +1165,7 @@ qemuMigrationParamsCheck(virQEMUDriver *driver,
|
|||
qemuMigrationParty party;
|
||||
size_t i;
|
||||
|
||||
if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
|
||||
if (asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
|
||||
party = QEMU_MIGRATION_SOURCE;
|
||||
else
|
||||
party = QEMU_MIGRATION_DESTINATION;
|
||||
|
|
|
@ -462,7 +462,7 @@ qemuProcessFakeReboot(void *opaque)
|
|||
|
||||
VIR_DEBUG("vm=%p", vm);
|
||||
virObjectLock(vm);
|
||||
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
||||
if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
|
@ -484,7 +484,7 @@ qemuProcessFakeReboot(void *opaque)
|
|||
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
reason,
|
||||
QEMU_ASYNC_JOB_NONE) < 0) {
|
||||
VIR_ASYNC_JOB_NONE) < 0) {
|
||||
if (virGetLastErrorCode() == VIR_ERR_OK)
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR,
|
||||
"%s", _("resume operation failed"));
|
||||
|
@ -650,7 +650,7 @@ qemuProcessHandleStop(qemuMonitor *mon G_GNUC_UNUSED,
|
|||
* reveal it in domain state nor sent events */
|
||||
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
|
||||
!priv->pausedShutdown) {
|
||||
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
|
||||
reason = VIR_DOMAIN_PAUSED_POSTCOPY;
|
||||
else
|
||||
|
@ -1525,7 +1525,7 @@ qemuProcessHandleSpiceMigrated(qemuMonitor *mon G_GNUC_UNUSED,
|
|||
|
||||
priv = vm->privateData;
|
||||
jobPriv = priv->job.privateData;
|
||||
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
||||
if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
VIR_DEBUG("got SPICE_MIGRATE_COMPLETED event without a migration job");
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -1557,7 +1557,7 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
|
|||
qemuMonitorMigrationStatusTypeToString(status));
|
||||
|
||||
priv = vm->privateData;
|
||||
if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
|
||||
VIR_DEBUG("got MIGRATION event without a migration job");
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -1568,7 +1568,7 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
|
|||
virDomainObjBroadcast(vm);
|
||||
|
||||
if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY &&
|
||||
priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT &&
|
||||
priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
|
||||
virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
|
||||
reason == VIR_DOMAIN_PAUSED_MIGRATION) {
|
||||
VIR_DEBUG("Correcting paused state reason for domain %s to %s",
|
||||
|
@ -1603,7 +1603,7 @@ qemuProcessHandleMigrationPass(qemuMonitor *mon G_GNUC_UNUSED,
|
|||
vm, vm->def->name, pass);
|
||||
|
||||
priv = vm->privateData;
|
||||
if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
|
||||
VIR_DEBUG("got MIGRATION_PASS event without a migration job");
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -1636,7 +1636,7 @@ qemuProcessHandleDumpCompleted(qemuMonitor *mon G_GNUC_UNUSED,
|
|||
priv = vm->privateData;
|
||||
jobPriv = priv->job.privateData;
|
||||
privJobCurrent = priv->job.current->privateData;
|
||||
if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
|
||||
VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job");
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -1897,7 +1897,7 @@ qemuProcessMonitorLogFree(void *opaque)
|
|||
static int
|
||||
qemuProcessInitMonitor(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -2190,7 +2190,7 @@ qemuProcessRefreshChannelVirtioState(virQEMUDriver *driver,
|
|||
int
|
||||
qemuRefreshVirtioChannelState(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
g_autoptr(GHashTable) info = NULL;
|
||||
|
@ -2546,7 +2546,7 @@ qemuProcessInitCpuAffinity(virDomainObj *vm G_GNUC_UNUSED)
|
|||
static int
|
||||
qemuProcessSetLinkStates(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainDef *def = vm->def;
|
||||
|
@ -3210,7 +3210,7 @@ qemuProcessPrepareMonitorChr(virDomainChrSourceDef *monConfig,
|
|||
int
|
||||
qemuProcessStartCPUs(virQEMUDriver *driver, virDomainObj *vm,
|
||||
virDomainRunningReason reason,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
int ret = -1;
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
@ -3261,7 +3261,7 @@ qemuProcessStartCPUs(virQEMUDriver *driver, virDomainObj *vm,
|
|||
int qemuProcessStopCPUs(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
virDomainPausedReason reason,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
int ret = -1;
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
@ -3471,7 +3471,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriver *driver,
|
|||
vm->def->name);
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_MIGRATED,
|
||||
QEMU_ASYNC_JOB_NONE) < 0) {
|
||||
VIR_ASYNC_JOB_NONE) < 0) {
|
||||
VIR_WARN("Could not resume domain %s", vm->def->name);
|
||||
}
|
||||
break;
|
||||
|
@ -3489,7 +3489,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriver *driver,
|
|||
break;
|
||||
}
|
||||
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE,
|
||||
qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_NONE,
|
||||
jobPriv->migParams, job->apiFlags);
|
||||
return 0;
|
||||
}
|
||||
|
@ -3579,13 +3579,13 @@ qemuProcessRecoverMigrationOut(virQEMUDriver *driver,
|
|||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
|
||||
QEMU_ASYNC_JOB_NONE) < 0) {
|
||||
VIR_ASYNC_JOB_NONE) < 0) {
|
||||
VIR_WARN("Could not resume domain %s", vm->def->name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE,
|
||||
qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_NONE,
|
||||
jobPriv->migParams, job->apiFlags);
|
||||
return 0;
|
||||
}
|
||||
|
@ -3604,21 +3604,21 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
|
|||
state = virDomainObjGetState(vm, &reason);
|
||||
|
||||
switch (job->asyncJob) {
|
||||
case QEMU_ASYNC_JOB_MIGRATION_OUT:
|
||||
case VIR_ASYNC_JOB_MIGRATION_OUT:
|
||||
if (qemuProcessRecoverMigrationOut(driver, vm, job,
|
||||
state, reason, stopFlags) < 0)
|
||||
return -1;
|
||||
break;
|
||||
|
||||
case QEMU_ASYNC_JOB_MIGRATION_IN:
|
||||
case VIR_ASYNC_JOB_MIGRATION_IN:
|
||||
if (qemuProcessRecoverMigrationIn(driver, vm, job,
|
||||
state, reason) < 0)
|
||||
return -1;
|
||||
break;
|
||||
|
||||
case QEMU_ASYNC_JOB_SAVE:
|
||||
case QEMU_ASYNC_JOB_DUMP:
|
||||
case QEMU_ASYNC_JOB_SNAPSHOT:
|
||||
case VIR_ASYNC_JOB_SAVE:
|
||||
case VIR_ASYNC_JOB_DUMP:
|
||||
case VIR_ASYNC_JOB_SNAPSHOT:
|
||||
qemuDomainObjEnterMonitor(driver, vm);
|
||||
ignore_value(qemuMonitorMigrateCancel(priv->mon));
|
||||
qemuDomainObjExitMonitor(vm);
|
||||
|
@ -3627,39 +3627,39 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
|
|||
* recovering an async job, this function is run at startup
|
||||
* and must resume things using sync monitor connections. */
|
||||
if (state == VIR_DOMAIN_PAUSED &&
|
||||
((job->asyncJob == QEMU_ASYNC_JOB_DUMP &&
|
||||
((job->asyncJob == VIR_ASYNC_JOB_DUMP &&
|
||||
reason == VIR_DOMAIN_PAUSED_DUMP) ||
|
||||
(job->asyncJob == QEMU_ASYNC_JOB_SAVE &&
|
||||
(job->asyncJob == VIR_ASYNC_JOB_SAVE &&
|
||||
reason == VIR_DOMAIN_PAUSED_SAVE) ||
|
||||
(job->asyncJob == QEMU_ASYNC_JOB_SNAPSHOT &&
|
||||
(job->asyncJob == VIR_ASYNC_JOB_SNAPSHOT &&
|
||||
(reason == VIR_DOMAIN_PAUSED_SNAPSHOT ||
|
||||
reason == VIR_DOMAIN_PAUSED_MIGRATION)) ||
|
||||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
|
||||
QEMU_ASYNC_JOB_NONE) < 0) {
|
||||
VIR_ASYNC_JOB_NONE) < 0) {
|
||||
VIR_WARN("Could not resume domain '%s' after migration to file",
|
||||
vm->def->name);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case QEMU_ASYNC_JOB_START:
|
||||
case VIR_ASYNC_JOB_START:
|
||||
/* Already handled in VIR_DOMAIN_PAUSED_STARTING_UP check. */
|
||||
break;
|
||||
|
||||
case QEMU_ASYNC_JOB_BACKUP:
|
||||
case VIR_ASYNC_JOB_BACKUP:
|
||||
ignore_value(virTimeMillisNow(&now));
|
||||
|
||||
/* Restore the config of the async job which is not persisted */
|
||||
priv->job.jobsQueued++;
|
||||
priv->job.asyncJob = QEMU_ASYNC_JOB_BACKUP;
|
||||
priv->job.asyncJob = VIR_ASYNC_JOB_BACKUP;
|
||||
priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet());
|
||||
priv->job.asyncStarted = now;
|
||||
|
||||
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
|
||||
JOB_MASK(QEMU_JOB_SUSPEND) |
|
||||
JOB_MASK(QEMU_JOB_MODIFY)));
|
||||
JOB_MASK(VIR_JOB_SUSPEND) |
|
||||
JOB_MASK(VIR_JOB_MODIFY)));
|
||||
|
||||
/* We reset the job parameters for backup so that the job will look
|
||||
* active. This is possible because we are able to recover the state
|
||||
|
@ -3673,8 +3673,8 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
|
|||
priv->job.current->started = now;
|
||||
break;
|
||||
|
||||
case QEMU_ASYNC_JOB_NONE:
|
||||
case QEMU_ASYNC_JOB_LAST:
|
||||
case VIR_ASYNC_JOB_NONE:
|
||||
case VIR_ASYNC_JOB_LAST:
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -3686,32 +3686,32 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
|
|||
* for the job to be properly tracked in domain state XML.
|
||||
*/
|
||||
switch (job->active) {
|
||||
case QEMU_JOB_QUERY:
|
||||
case VIR_JOB_QUERY:
|
||||
/* harmless */
|
||||
break;
|
||||
|
||||
case QEMU_JOB_DESTROY:
|
||||
case VIR_JOB_DESTROY:
|
||||
VIR_DEBUG("Domain %s should have already been destroyed",
|
||||
vm->def->name);
|
||||
return -1;
|
||||
|
||||
case QEMU_JOB_SUSPEND:
|
||||
case VIR_JOB_SUSPEND:
|
||||
/* mostly harmless */
|
||||
break;
|
||||
|
||||
case QEMU_JOB_MODIFY:
|
||||
case VIR_JOB_MODIFY:
|
||||
/* XXX depending on the command we may be in an inconsistent state and
|
||||
* we should probably fall back to "monitor error" state and refuse to
|
||||
*/
|
||||
break;
|
||||
|
||||
case QEMU_JOB_MIGRATION_OP:
|
||||
case QEMU_JOB_ABORT:
|
||||
case QEMU_JOB_ASYNC:
|
||||
case QEMU_JOB_ASYNC_NESTED:
|
||||
case VIR_JOB_MIGRATION_OP:
|
||||
case VIR_JOB_ABORT:
|
||||
case VIR_JOB_ASYNC:
|
||||
case VIR_JOB_ASYNC_NESTED:
|
||||
/* async job was already handled above */
|
||||
case QEMU_JOB_NONE:
|
||||
case QEMU_JOB_LAST:
|
||||
case VIR_JOB_NONE:
|
||||
case VIR_JOB_LAST:
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -3727,7 +3727,7 @@ qemuProcessUpdateDevices(virQEMUDriver *driver,
|
|||
g_auto(GStrv) old = g_steal_pointer(&priv->qemuDevices);
|
||||
GStrv tmp;
|
||||
|
||||
if (qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuDomainUpdateDeviceList(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
|
||||
return -1;
|
||||
|
||||
if (!old)
|
||||
|
@ -4250,7 +4250,7 @@ qemuProcessGetVCPUQOMPath(virDomainObj *vm)
|
|||
static int
|
||||
qemuProcessFetchGuestCPU(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virCPUData **enabled,
|
||||
virCPUData **disabled)
|
||||
{
|
||||
|
@ -4358,7 +4358,7 @@ qemuProcessUpdateLiveGuestCPU(virDomainObj *vm,
|
|||
static int
|
||||
qemuProcessUpdateAndVerifyCPU(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
g_autoptr(virCPUData) cpu = NULL;
|
||||
g_autoptr(virCPUData) disabled = NULL;
|
||||
|
@ -4379,7 +4379,7 @@ qemuProcessUpdateAndVerifyCPU(virQEMUDriver *driver,
|
|||
static int
|
||||
qemuProcessFetchCPUDefinitions(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
virDomainCapsCPUModels **cpuModels)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
@ -4403,7 +4403,7 @@ qemuProcessFetchCPUDefinitions(virQEMUDriver *driver,
|
|||
static int
|
||||
qemuProcessUpdateCPU(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
g_autoptr(virCPUData) cpu = NULL;
|
||||
g_autoptr(virCPUData) disabled = NULL;
|
||||
|
@ -4613,9 +4613,9 @@ qemuProcessIncomingDefNew(virQEMUCaps *qemuCaps,
|
|||
|
||||
|
||||
/*
|
||||
* This function starts a new QEMU_ASYNC_JOB_START async job. The user is
|
||||
* This function starts a new VIR_ASYNC_JOB_START async job. The user is
|
||||
* responsible for calling qemuProcessEndJob to stop this job and for passing
|
||||
* QEMU_ASYNC_JOB_START as @asyncJob argument to any function requiring this
|
||||
* VIR_ASYNC_JOB_START as @asyncJob argument to any function requiring this
|
||||
* parameter between qemuProcessBeginJob and qemuProcessEndJob.
|
||||
*/
|
||||
int
|
||||
|
@ -4624,11 +4624,11 @@ qemuProcessBeginJob(virQEMUDriver *driver,
|
|||
virDomainJobOperation operation,
|
||||
unsigned long apiFlags)
|
||||
{
|
||||
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START,
|
||||
if (qemuDomainObjBeginAsyncJob(driver, vm, VIR_ASYNC_JOB_START,
|
||||
operation, apiFlags) < 0)
|
||||
return -1;
|
||||
|
||||
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
|
||||
qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_NONE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5083,7 +5083,7 @@ qemuProcessSetupRawIO(virDomainObj *vm,
|
|||
static int
|
||||
qemuProcessSetupBalloon(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
unsigned long long balloon = vm->def->mem.cur_balloon;
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
@ -5561,7 +5561,7 @@ int
|
|||
qemuProcessInit(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
virCPUDef *updatedCPU,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
bool migration,
|
||||
unsigned int flags)
|
||||
{
|
||||
|
@ -5952,7 +5952,7 @@ qemuProcessVcpusSortOrder(const void *a,
|
|||
static int
|
||||
qemuProcessSetupHotpluggableVcpus(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def);
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
@ -7124,7 +7124,7 @@ qemuProcessGenID(virDomainObj *vm,
|
|||
static int
|
||||
qemuProcessSetupDiskThrottlingBlockdev(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
size_t i;
|
||||
|
@ -7201,7 +7201,7 @@ qemuProcessEnablePerf(virDomainObj *vm)
|
|||
|
||||
static int
|
||||
qemuProcessSetupDisksTransientSnapshot(virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
|
||||
g_autoptr(GHashTable) blockNamedNodeData = NULL;
|
||||
|
@ -7252,7 +7252,7 @@ qemuProcessSetupDisksTransientSnapshot(virDomainObj *vm,
|
|||
|
||||
static int
|
||||
qemuProcessSetupDisksTransientHotplug(virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
bool hasHotpluggedDisk = false;
|
||||
|
@ -7292,7 +7292,7 @@ qemuProcessSetupDisksTransientHotplug(virDomainObj *vm,
|
|||
|
||||
static int
|
||||
qemuProcessSetupDisksTransient(virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
||||
|
@ -7311,7 +7311,7 @@ qemuProcessSetupDisksTransient(virDomainObj *vm,
|
|||
|
||||
static int
|
||||
qemuProcessSetupLifecycleActions(virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
int rc;
|
||||
|
@ -7358,7 +7358,7 @@ int
|
|||
qemuProcessLaunch(virConnectPtr conn,
|
||||
virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
qemuProcessIncomingDef *incoming,
|
||||
virDomainMomentObj *snapshot,
|
||||
virNetDevVPortProfileOp vmop,
|
||||
|
@ -7721,7 +7721,7 @@ qemuProcessLaunch(virConnectPtr conn,
|
|||
int
|
||||
qemuProcessRefreshState(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
||||
|
@ -7756,7 +7756,7 @@ qemuProcessRefreshState(virQEMUDriver *driver,
|
|||
int
|
||||
qemuProcessFinishStartup(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
bool startCPUs,
|
||||
virDomainPausedReason pausedReason)
|
||||
{
|
||||
|
@ -7794,7 +7794,7 @@ qemuProcessStart(virConnectPtr conn,
|
|||
virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
virCPUDef *updatedCPU,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
const char *migrateFrom,
|
||||
int migrateFd,
|
||||
const char *migratePath,
|
||||
|
@ -7814,7 +7814,7 @@ qemuProcessStart(virConnectPtr conn,
|
|||
"migrateFrom=%s migrateFd=%d migratePath=%s "
|
||||
"snapshot=%p vmop=%d flags=0x%x",
|
||||
conn, driver, vm, vm->def->name, vm->def->id,
|
||||
qemuDomainAsyncJobTypeToString(asyncJob),
|
||||
virDomainAsyncJobTypeToString(asyncJob),
|
||||
NULLSTR(migrateFrom), migrateFd, NULLSTR(migratePath),
|
||||
snapshot, vmop, flags);
|
||||
|
||||
|
@ -7922,7 +7922,7 @@ qemuProcessCreatePretendCmdPrepare(virQEMUDriver *driver,
|
|||
if (!migrateURI)
|
||||
flags |= VIR_QEMU_PROCESS_START_NEW;
|
||||
|
||||
if (qemuProcessInit(driver, vm, NULL, QEMU_ASYNC_JOB_NONE,
|
||||
if (qemuProcessInit(driver, vm, NULL, VIR_ASYNC_JOB_NONE,
|
||||
!!migrateURI, flags) < 0)
|
||||
return -1;
|
||||
|
||||
|
@ -7993,7 +7993,7 @@ qemuProcessKill(virDomainObj *vm, unsigned int flags)
|
|||
int
|
||||
qemuProcessBeginStopJob(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainJob job,
|
||||
virDomainJob job,
|
||||
bool forceKill)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
@ -8026,7 +8026,7 @@ qemuProcessBeginStopJob(virQEMUDriver *driver,
|
|||
void qemuProcessStop(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
virDomainShutoffReason reason,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
unsigned int flags)
|
||||
{
|
||||
int ret;
|
||||
|
@ -8045,21 +8045,21 @@ void qemuProcessStop(virQEMUDriver *driver,
|
|||
vm, vm->def->name, vm->def->id,
|
||||
(long long)vm->pid,
|
||||
virDomainShutoffReasonTypeToString(reason),
|
||||
qemuDomainAsyncJobTypeToString(asyncJob),
|
||||
virDomainAsyncJobTypeToString(asyncJob),
|
||||
flags);
|
||||
|
||||
/* This method is routinely used in clean up paths. Disable error
|
||||
* reporting so we don't squash a legit error. */
|
||||
virErrorPreserveLast(&orig_err);
|
||||
|
||||
if (asyncJob != QEMU_ASYNC_JOB_NONE) {
|
||||
if (asyncJob != VIR_ASYNC_JOB_NONE) {
|
||||
if (qemuDomainObjBeginNestedJob(driver, vm, asyncJob) < 0)
|
||||
goto cleanup;
|
||||
} else if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE &&
|
||||
} else if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE &&
|
||||
priv->job.asyncOwner == virThreadSelfID() &&
|
||||
priv->job.active != QEMU_JOB_ASYNC_NESTED) {
|
||||
priv->job.active != VIR_JOB_ASYNC_NESTED) {
|
||||
VIR_WARN("qemuProcessStop called without a nested job (async=%s)",
|
||||
qemuDomainAsyncJobTypeToString(asyncJob));
|
||||
virDomainAsyncJobTypeToString(asyncJob));
|
||||
}
|
||||
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
|
@ -8368,7 +8368,7 @@ void qemuProcessStop(virQEMUDriver *driver,
|
|||
virDomainObjRemoveTransientDef(vm);
|
||||
|
||||
endjob:
|
||||
if (asyncJob != QEMU_ASYNC_JOB_NONE)
|
||||
if (asyncJob != VIR_ASYNC_JOB_NONE)
|
||||
qemuDomainObjEndJob(vm);
|
||||
|
||||
cleanup:
|
||||
|
@ -8388,7 +8388,7 @@ qemuProcessAutoDestroy(virDomainObj *dom,
|
|||
|
||||
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
|
||||
|
||||
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
|
||||
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
||||
|
||||
if (priv->job.asyncJob) {
|
||||
|
@ -8399,11 +8399,11 @@ qemuProcessAutoDestroy(virDomainObj *dom,
|
|||
|
||||
VIR_DEBUG("Killing domain");
|
||||
|
||||
if (qemuProcessBeginStopJob(driver, dom, QEMU_JOB_DESTROY, true) < 0)
|
||||
if (qemuProcessBeginStopJob(driver, dom, VIR_JOB_DESTROY, true) < 0)
|
||||
return;
|
||||
|
||||
qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED,
|
||||
QEMU_ASYNC_JOB_NONE, stopFlags);
|
||||
VIR_ASYNC_JOB_NONE, stopFlags);
|
||||
|
||||
virDomainAuditStop(dom, "destroyed");
|
||||
event = virDomainEventLifecycleNewFromObj(dom,
|
||||
|
@ -8447,7 +8447,7 @@ bool qemuProcessAutoDestroyActive(virQEMUDriver *driver,
|
|||
int
|
||||
qemuProcessRefreshDisks(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
bool blockdev = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV);
|
||||
|
@ -8498,7 +8498,7 @@ qemuProcessRefreshDisks(virQEMUDriver *driver,
|
|||
static int
|
||||
qemuProcessRefreshCPUMigratability(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainDef *def = vm->def;
|
||||
|
@ -8559,7 +8559,7 @@ qemuProcessRefreshCPU(virQEMUDriver *driver,
|
|||
if (!vm->def->cpu)
|
||||
return 0;
|
||||
|
||||
if (qemuProcessRefreshCPUMigratability(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuProcessRefreshCPUMigratability(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
|
||||
return -1;
|
||||
|
||||
if (!(host = virQEMUDriverGetHostCPU(driver))) {
|
||||
|
@ -8594,7 +8594,7 @@ qemuProcessRefreshCPU(virQEMUDriver *driver,
|
|||
if (virCPUUpdate(vm->def->os.arch, vm->def->cpu, cpu) < 0)
|
||||
return -1;
|
||||
|
||||
if (qemuProcessUpdateCPU(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuProcessUpdateCPU(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
|
||||
return -1;
|
||||
} else if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CPU_MODEL_EXPANSION)) {
|
||||
/* We only try to fix CPUs when the libvirt/QEMU combo used to start
|
||||
|
@ -8755,12 +8755,12 @@ qemuProcessReconnect(void *opaque)
|
|||
priv = obj->privateData;
|
||||
|
||||
qemuDomainObjRestoreJob(obj, &oldjob);
|
||||
if (oldjob.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
|
||||
if (oldjob.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
|
||||
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
||||
if (oldjob.asyncJob == QEMU_ASYNC_JOB_BACKUP && priv->backup)
|
||||
if (oldjob.asyncJob == VIR_ASYNC_JOB_BACKUP && priv->backup)
|
||||
priv->backup->apiFlags = oldjob.apiFlags;
|
||||
|
||||
if (qemuDomainObjBeginJob(driver, obj, QEMU_JOB_MODIFY) < 0)
|
||||
if (qemuDomainObjBeginJob(driver, obj, VIR_JOB_MODIFY) < 0)
|
||||
goto error;
|
||||
jobStarted = true;
|
||||
|
||||
|
@ -8792,7 +8792,7 @@ qemuProcessReconnect(void *opaque)
|
|||
tryMonReconn = true;
|
||||
|
||||
/* XXX check PID liveliness & EXE path */
|
||||
if (qemuConnectMonitor(driver, obj, QEMU_ASYNC_JOB_NONE, retry, NULL) < 0)
|
||||
if (qemuConnectMonitor(driver, obj, VIR_ASYNC_JOB_NONE, retry, NULL) < 0)
|
||||
goto error;
|
||||
|
||||
priv->machineName = qemuDomainGetMachineName(obj);
|
||||
|
@ -8887,7 +8887,7 @@ qemuProcessReconnect(void *opaque)
|
|||
ignore_value(qemuSecurityCheckAllLabel(driver->securityManager,
|
||||
obj->def));
|
||||
|
||||
if (qemuDomainRefreshVcpuInfo(driver, obj, QEMU_ASYNC_JOB_NONE, true) < 0)
|
||||
if (qemuDomainRefreshVcpuInfo(driver, obj, VIR_ASYNC_JOB_NONE, true) < 0)
|
||||
goto error;
|
||||
|
||||
qemuDomainVcpuPersistOrder(obj->def);
|
||||
|
@ -8895,10 +8895,10 @@ qemuProcessReconnect(void *opaque)
|
|||
if (qemuProcessRefreshCPU(driver, obj) < 0)
|
||||
goto error;
|
||||
|
||||
if (qemuDomainUpdateMemoryDeviceInfo(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuDomainUpdateMemoryDeviceInfo(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
|
||||
goto error;
|
||||
|
||||
if (qemuProcessDetectIOThreadPIDs(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuProcessDetectIOThreadPIDs(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
|
||||
goto error;
|
||||
|
||||
if (qemuSecurityReserveLabel(driver->securityManager, obj->def, obj->pid) < 0)
|
||||
|
@ -8908,7 +8908,7 @@ qemuProcessReconnect(void *opaque)
|
|||
|
||||
qemuProcessFiltersInstantiate(obj->def);
|
||||
|
||||
if (qemuProcessRefreshDisks(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuProcessRefreshDisks(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
|
||||
goto error;
|
||||
|
||||
/* At this point we've already checked that the startup of the VM was
|
||||
|
@ -8922,16 +8922,16 @@ qemuProcessReconnect(void *opaque)
|
|||
}
|
||||
|
||||
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
|
||||
qemuBlockNodeNamesDetect(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
qemuBlockNodeNamesDetect(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
|
||||
goto error;
|
||||
|
||||
if (qemuRefreshVirtioChannelState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuRefreshVirtioChannelState(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
|
||||
goto error;
|
||||
|
||||
/* If querying of guest's RTC failed, report error, but do not kill the domain. */
|
||||
qemuRefreshRTC(driver, obj);
|
||||
|
||||
if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
if (qemuProcessRefreshBalloonState(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
|
||||
goto error;
|
||||
|
||||
if (qemuProcessRecoverJob(driver, obj, &oldjob, &stopFlags) < 0)
|
||||
|
@ -9030,7 +9030,7 @@ qemuProcessReconnect(void *opaque)
|
|||
* thread didn't have a chance to start playing with the domain yet
|
||||
* (it's all we can do anyway).
|
||||
*/
|
||||
qemuProcessStop(driver, obj, state, QEMU_ASYNC_JOB_NONE, stopFlags);
|
||||
qemuProcessStop(driver, obj, state, VIR_ASYNC_JOB_NONE, stopFlags);
|
||||
}
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -9072,7 +9072,7 @@ qemuProcessReconnectHelper(virDomainObj *obj,
|
|||
* object.
|
||||
*/
|
||||
qemuProcessStop(src->driver, obj, VIR_DOMAIN_SHUTOFF_FAILED,
|
||||
QEMU_ASYNC_JOB_NONE, 0);
|
||||
VIR_ASYNC_JOB_NONE, 0);
|
||||
qemuDomainRemoveInactiveJobLocked(src->driver, obj);
|
||||
|
||||
virDomainObjEndAPI(&obj);
|
||||
|
|
|
@ -32,11 +32,11 @@ int qemuProcessPrepareMonitorChr(virDomainChrSourceDef *monConfig,
|
|||
int qemuProcessStartCPUs(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
virDomainRunningReason reason,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
int qemuProcessStopCPUs(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
virDomainPausedReason reason,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
int qemuProcessBuildDestroyMemoryPaths(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
|
@ -85,7 +85,7 @@ int qemuProcessStart(virConnectPtr conn,
|
|||
virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
virCPUDef *updatedCPU,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
const char *migrateFrom,
|
||||
int stdin_fd,
|
||||
const char *stdin_path,
|
||||
|
@ -107,7 +107,7 @@ virCommand *qemuProcessCreatePretendCmdBuild(virQEMUDriver *driver,
|
|||
int qemuProcessInit(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
virCPUDef *updatedCPU,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
bool migration,
|
||||
unsigned int flags);
|
||||
|
||||
|
@ -132,7 +132,7 @@ int qemuProcessPrepareHost(virQEMUDriver *driver,
|
|||
int qemuProcessLaunch(virConnectPtr conn,
|
||||
virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
qemuProcessIncomingDef *incoming,
|
||||
virDomainMomentObj *snapshot,
|
||||
virNetDevVPortProfileOp vmop,
|
||||
|
@ -140,13 +140,13 @@ int qemuProcessLaunch(virConnectPtr conn,
|
|||
|
||||
int qemuProcessFinishStartup(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
bool startCPUs,
|
||||
virDomainPausedReason pausedReason);
|
||||
|
||||
int qemuProcessRefreshState(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
typedef enum {
|
||||
VIR_QEMU_PROCESS_STOP_MIGRATED = 1 << 0,
|
||||
|
@ -155,12 +155,12 @@ typedef enum {
|
|||
|
||||
int qemuProcessBeginStopJob(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainJob job,
|
||||
virDomainJob job,
|
||||
bool forceKill);
|
||||
void qemuProcessStop(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
virDomainShutoffReason reason,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
virDomainAsyncJob asyncJob,
|
||||
unsigned int flags);
|
||||
|
||||
typedef enum {
|
||||
|
@ -200,7 +200,7 @@ int qemuProcessSetupIOThread(virDomainObj *vm,
|
|||
|
||||
int qemuRefreshVirtioChannelState(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
int qemuProcessRefreshBalloonState(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
|
@ -208,7 +208,7 @@ int qemuProcessRefreshBalloonState(virQEMUDriver *driver,
|
|||
|
||||
int qemuProcessRefreshDisks(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
int qemuProcessStartManagedPRDaemon(virDomainObj *vm) G_GNUC_NO_INLINE;
|
||||
|
||||
|
|
|
@ -259,7 +259,7 @@ qemuSaveImageCreate(virQEMUDriver *driver,
|
|||
virQEMUSaveData *data,
|
||||
virCommand *compressor,
|
||||
unsigned int flags,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
||||
bool needUnlink = false;
|
||||
|
@ -578,7 +578,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
|
|||
const char *path,
|
||||
bool start_paused,
|
||||
bool reset_nvram,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
int ret = -1;
|
||||
|
|
|
@ -68,7 +68,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
|
|||
const char *path,
|
||||
bool start_paused,
|
||||
bool reset_nvram,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6);
|
||||
|
||||
int
|
||||
|
@ -97,7 +97,7 @@ qemuSaveImageCreate(virQEMUDriver *driver,
|
|||
virQEMUSaveData *data,
|
||||
virCommand *compressor,
|
||||
unsigned int flags,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
int
|
||||
virQEMUSaveDataWrite(virQEMUSaveData *data,
|
||||
|
|
|
@ -304,7 +304,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
|
|||
* domain. Thus we stop and start CPUs ourselves.
|
||||
*/
|
||||
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
|
||||
QEMU_ASYNC_JOB_SNAPSHOT) < 0)
|
||||
VIR_ASYNC_JOB_SNAPSHOT) < 0)
|
||||
goto cleanup;
|
||||
|
||||
resume = true;
|
||||
|
@ -316,7 +316,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
|
|||
}
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
||||
QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
|
||||
VIR_ASYNC_JOB_SNAPSHOT) < 0) {
|
||||
resume = false;
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
|
|||
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
|
||||
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
|
||||
QEMU_ASYNC_JOB_SNAPSHOT, 0);
|
||||
VIR_ASYNC_JOB_SNAPSHOT, 0);
|
||||
virDomainAuditStop(vm, "from-snapshot");
|
||||
resume = false;
|
||||
}
|
||||
|
@ -342,7 +342,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
|
|||
if (resume && virDomainObjIsActive(vm) &&
|
||||
qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_UNPAUSED,
|
||||
QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
|
||||
VIR_ASYNC_JOB_SNAPSHOT) < 0) {
|
||||
event = virDomainEventLifecycleNewFromObj(vm,
|
||||
VIR_DOMAIN_EVENT_SUSPENDED,
|
||||
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
|
||||
|
@ -863,7 +863,7 @@ static void
|
|||
qemuSnapshotDiskCleanup(qemuSnapshotDiskData *data,
|
||||
size_t ndata,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virQEMUDriver *driver = priv->driver;
|
||||
|
@ -922,7 +922,7 @@ struct _qemuSnapshotDiskContext {
|
|||
|
||||
/* needed for automatic cleanup of 'dd' */
|
||||
virDomainObj *vm;
|
||||
qemuDomainAsyncJob asyncJob;
|
||||
virDomainAsyncJob asyncJob;
|
||||
};
|
||||
|
||||
typedef struct _qemuSnapshotDiskContext qemuSnapshotDiskContext;
|
||||
|
@ -931,7 +931,7 @@ typedef struct _qemuSnapshotDiskContext qemuSnapshotDiskContext;
|
|||
qemuSnapshotDiskContext *
|
||||
qemuSnapshotDiskContextNew(size_t ndisks,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virQEMUDriver *driver = priv->driver;
|
||||
|
@ -1008,7 +1008,7 @@ qemuSnapshotDiskPrepareOneBlockdev(virQEMUDriver *driver,
|
|||
virQEMUDriverConfig *cfg,
|
||||
bool reuse,
|
||||
GHashTable *blockNamedNodeData,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
g_autoptr(virStorageSource) terminator = NULL;
|
||||
|
@ -1165,7 +1165,7 @@ qemuSnapshotDiskPrepareActiveExternal(virDomainObj *vm,
|
|||
virDomainMomentObj *snap,
|
||||
bool reuse,
|
||||
GHashTable *blockNamedNodeData,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
|
||||
size_t i;
|
||||
|
@ -1319,7 +1319,7 @@ qemuSnapshotCreateActiveExternalDisks(virDomainObj *vm,
|
|||
virDomainMomentObj *snap,
|
||||
GHashTable *blockNamedNodeData,
|
||||
unsigned int flags,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
bool reuse = (flags & VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) != 0;
|
||||
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
|
||||
|
@ -1371,7 +1371,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
|
|||
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) {
|
||||
int frozen;
|
||||
|
||||
if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
|
||||
if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (virDomainObjCheckActive(vm) < 0) {
|
||||
|
@ -1405,7 +1405,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
|
|||
* when the user wants to manually snapshot some disks */
|
||||
if (((memory || has_manual) && !(flags & VIR_DOMAIN_SNAPSHOT_CREATE_LIVE))) {
|
||||
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SNAPSHOT,
|
||||
QEMU_ASYNC_JOB_SNAPSHOT) < 0)
|
||||
VIR_ASYNC_JOB_SNAPSHOT) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
|
@ -1420,7 +1420,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
|
|||
* migration step as qemu deactivates bitmaps after migration so the result
|
||||
* would be wrong */
|
||||
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
|
||||
!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_SNAPSHOT)))
|
||||
!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_SNAPSHOT)))
|
||||
goto cleanup;
|
||||
|
||||
/* do the memory snapshot if necessary */
|
||||
|
@ -1434,8 +1434,8 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
|
|||
|
||||
/* allow the migration job to be cancelled or the domain to be paused */
|
||||
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
|
||||
JOB_MASK(QEMU_JOB_SUSPEND) |
|
||||
JOB_MASK(QEMU_JOB_MIGRATION_OP)));
|
||||
JOB_MASK(VIR_JOB_SUSPEND) |
|
||||
JOB_MASK(VIR_JOB_MIGRATION_OP)));
|
||||
|
||||
if ((compressed = qemuSaveImageGetCompressionProgram(cfg->snapshotImageFormat,
|
||||
&compressor,
|
||||
|
@ -1458,7 +1458,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
|
|||
|
||||
if ((ret = qemuSaveImageCreate(driver, vm, snapdef->memorysnapshotfile,
|
||||
data, compressor, 0,
|
||||
QEMU_ASYNC_JOB_SNAPSHOT)) < 0)
|
||||
VIR_ASYNC_JOB_SNAPSHOT)) < 0)
|
||||
goto cleanup;
|
||||
|
||||
/* the memory image was created, remove it on errors */
|
||||
|
@ -1473,7 +1473,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
|
|||
|
||||
if ((ret = qemuSnapshotCreateActiveExternalDisks(vm, snap,
|
||||
blockNamedNodeData, flags,
|
||||
QEMU_ASYNC_JOB_SNAPSHOT)) < 0)
|
||||
VIR_ASYNC_JOB_SNAPSHOT)) < 0)
|
||||
goto cleanup;
|
||||
|
||||
/* the snapshot is complete now */
|
||||
|
@ -1481,7 +1481,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
|
|||
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
|
||||
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
|
||||
QEMU_ASYNC_JOB_SNAPSHOT, 0);
|
||||
VIR_ASYNC_JOB_SNAPSHOT, 0);
|
||||
virDomainAuditStop(vm, "from-snapshot");
|
||||
resume = false;
|
||||
thaw = false;
|
||||
|
@ -1503,7 +1503,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
|
|||
if (resume && virDomainObjIsActive(vm) &&
|
||||
qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_UNPAUSED,
|
||||
QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
|
||||
VIR_ASYNC_JOB_SNAPSHOT) < 0) {
|
||||
event = virDomainEventLifecycleNewFromObj(vm,
|
||||
VIR_DOMAIN_EVENT_SUSPENDED,
|
||||
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
|
||||
|
@ -1517,7 +1517,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
|
|||
}
|
||||
|
||||
if (thaw &&
|
||||
qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) >= 0 &&
|
||||
qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) >= 0 &&
|
||||
virDomainObjIsActive(vm)) {
|
||||
/* report error only on an otherwise successful snapshot */
|
||||
if (qemuSnapshotFSThaw(vm, ret == 0) < 0)
|
||||
|
@ -1889,11 +1889,11 @@ qemuSnapshotCreateXML(virDomainPtr domain,
|
|||
* a regular job, so we need to set the job mask to disallow query as
|
||||
* 'savevm' blocks the monitor. External snapshot will then modify the
|
||||
* job mask appropriately. */
|
||||
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT,
|
||||
if (qemuDomainObjBeginAsyncJob(driver, vm, VIR_ASYNC_JOB_SNAPSHOT,
|
||||
VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flags) < 0)
|
||||
return NULL;
|
||||
|
||||
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
|
||||
qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_NONE);
|
||||
|
||||
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE) {
|
||||
snapshot = qemuSnapshotRedefine(vm, domain, def, driver, cfg, flags);
|
||||
|
@ -2067,7 +2067,7 @@ qemuSnapshotRevertActive(virDomainObj *vm,
|
|||
/* Transitions 5, 6, 8, 9 */
|
||||
qemuProcessStop(driver, vm,
|
||||
VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
|
||||
QEMU_ASYNC_JOB_START, 0);
|
||||
VIR_ASYNC_JOB_START, 0);
|
||||
virDomainAuditStop(vm, "from-snapshot");
|
||||
detail = VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT;
|
||||
event = virDomainEventLifecycleNewFromObj(vm,
|
||||
|
@ -2092,7 +2092,7 @@ qemuSnapshotRevertActive(virDomainObj *vm,
|
|||
|
||||
rc = qemuProcessStart(snapshot->domain->conn, driver, vm,
|
||||
cookie ? cookie->cpu : NULL,
|
||||
QEMU_ASYNC_JOB_START, NULL, -1, NULL, snap,
|
||||
VIR_ASYNC_JOB_START, NULL, -1, NULL, snap,
|
||||
VIR_NETDEV_VPORT_PROFILE_OP_CREATE,
|
||||
start_flags);
|
||||
virDomainAuditStart(vm, "from-snapshot", rc >= 0);
|
||||
|
@ -2125,7 +2125,7 @@ qemuSnapshotRevertActive(virDomainObj *vm,
|
|||
}
|
||||
rc = qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_FROM_SNAPSHOT,
|
||||
QEMU_ASYNC_JOB_START);
|
||||
VIR_ASYNC_JOB_START);
|
||||
if (rc < 0)
|
||||
return -1;
|
||||
}
|
||||
|
@ -2188,7 +2188,7 @@ qemuSnapshotRevertInactive(virDomainObj *vm,
|
|||
if (virDomainObjIsActive(vm)) {
|
||||
/* Transitions 4, 7 */
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
|
||||
QEMU_ASYNC_JOB_START, 0);
|
||||
VIR_ASYNC_JOB_START, 0);
|
||||
virDomainAuditStop(vm, "from-snapshot");
|
||||
detail = VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT;
|
||||
event = virDomainEventLifecycleNewFromObj(vm,
|
||||
|
@ -2215,7 +2215,7 @@ qemuSnapshotRevertInactive(virDomainObj *vm,
|
|||
start_flags |= paused ? VIR_QEMU_PROCESS_START_PAUSED : 0;
|
||||
|
||||
rc = qemuProcessStart(snapshot->domain->conn, driver, vm, NULL,
|
||||
QEMU_ASYNC_JOB_START, NULL, -1, NULL, NULL,
|
||||
VIR_ASYNC_JOB_START, NULL, -1, NULL, NULL,
|
||||
VIR_NETDEV_VPORT_PROFILE_OP_CREATE,
|
||||
start_flags);
|
||||
virDomainAuditStart(vm, "from-snapshot", rc >= 0);
|
||||
|
@ -2394,7 +2394,7 @@ qemuSnapshotDelete(virDomainObj *vm,
|
|||
VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY |
|
||||
VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY, -1);
|
||||
|
||||
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
||||
if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
|
||||
return -1;
|
||||
|
||||
if (!(snap = qemuSnapObjFromSnapshot(vm, snapshot)))
|
||||
|
|
|
@ -61,7 +61,7 @@ typedef struct _qemuSnapshotDiskContext qemuSnapshotDiskContext;
|
|||
qemuSnapshotDiskContext *
|
||||
qemuSnapshotDiskContextNew(size_t ndisks,
|
||||
virDomainObj *vm,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
void
|
||||
qemuSnapshotDiskContextCleanup(qemuSnapshotDiskContext *snapctxt);
|
||||
|
|
Loading…
Reference in New Issue