qemu: Refactor qemuDomainGetBlockInfo

Change the code so that it queries the monitor when the VM is alive.
This commit is contained in:
Peter Krempa 2015-06-23 18:22:54 +02:00
parent 0d130a011c
commit 15fa84acbb
1 changed files with 53 additions and 37 deletions

View File

@ -11800,10 +11800,12 @@ qemuDomainGetBlockInfo(virDomainPtr dom,
virQEMUDriverPtr driver = dom->conn->privateData; virQEMUDriverPtr driver = dom->conn->privateData;
virDomainObjPtr vm; virDomainObjPtr vm;
int ret = -1; int ret = -1;
virDomainDiskDefPtr disk = NULL; virDomainDiskDefPtr disk;
virStorageSourcePtr src;
bool activeFail = false;
virQEMUDriverConfigPtr cfg = NULL; virQEMUDriverConfigPtr cfg = NULL;
int rc;
virHashTablePtr stats = NULL;
qemuBlockStats *entry;
char *alias;
virCheckFlags(0, -1); virCheckFlags(0, -1);
@ -11815,11 +11817,6 @@ qemuDomainGetBlockInfo(virDomainPtr dom,
if (virDomainGetBlockInfoEnsureACL(dom->conn, vm->def) < 0) if (virDomainGetBlockInfoEnsureACL(dom->conn, vm->def) < 0)
goto cleanup; goto cleanup;
/* Technically, we only need a job if we are going to query the
* monitor, which is only for active domains that are using
* non-raw block devices. But it is easier to share code if we
* always grab a job; furthermore, grabbing the job ensures that
* hot-plug won't change disk behind our backs. */
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup; goto cleanup;
@ -11829,53 +11826,72 @@ qemuDomainGetBlockInfo(virDomainPtr dom,
goto endjob; goto endjob;
} }
src = disk->src; if (virStorageSourceIsEmpty(disk->src)) {
if (virStorageSourceIsEmpty(src)) {
virReportError(VIR_ERR_INVALID_ARG, virReportError(VIR_ERR_INVALID_ARG,
_("disk '%s' does not currently have a source assigned"), _("disk '%s' does not currently have a source assigned"),
path); path);
goto endjob; goto endjob;
} }
if ((ret = qemuStorageLimitsRefresh(driver, cfg, vm, src)) < 0) /* for inactive domains we have to peek into the files */
if (!virDomainObjIsActive(vm)) {
if ((qemuStorageLimitsRefresh(driver, cfg, vm, disk->src)) < 0)
goto endjob;
info->capacity = disk->src->capacity;
info->allocation = disk->src->allocation;
info->physical = disk->src->physical;
ret = 0;
goto endjob;
}
if (!disk->info.alias ||
!(alias = qemuDomainStorageAlias(disk->info.alias, 0))) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("missing disk device alias name for %s"), disk->dst);
goto endjob;
}
qemuDomainObjEnterMonitor(driver, vm);
rc = qemuMonitorGetAllBlockStatsInfo(qemuDomainGetMonitor(vm),
&stats, false);
if (rc >= 0)
rc = qemuMonitorBlockStatsUpdateCapacity(qemuDomainGetMonitor(vm),
stats, false);
if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0)
goto endjob; goto endjob;
if (!src->allocation) { if (!(entry = virHashLookup(stats, alias))) {
qemuDomainObjPrivatePtr priv = vm->privateData; virReportError(VIR_ERR_INTERNAL_ERROR,
_("failed to gather stats for disk '%s'"), disk->dst);
goto endjob;
}
/* If the guest is not running, then success/failure return if (!entry->wr_highest_offset_valid) {
* depends on whether domain is persistent if (virStorageSourceGetActualType(disk->src) == VIR_STORAGE_TYPE_BLOCK &&
*/ disk->src->format != VIR_STORAGE_FILE_RAW) {
if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_INTERNAL_ERROR,
activeFail = true; _("failed to query the maximum written offset of "
"block device '%s'"), disk->dst);
goto endjob; goto endjob;
} }
qemuDomainObjEnterMonitor(driver, vm); info->allocation = entry->physical;
ret = qemuMonitorGetBlockExtent(priv->mon, } else {
disk->info.alias, info->allocation = entry->wr_highest_offset;
&src->allocation);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
ret = -1;
} }
if (ret == 0) { info->capacity = entry->capacity;
info->capacity = src->capacity; info->physical = entry->physical;
info->allocation = src->allocation;
info->physical = src->physical; ret = 0;
}
endjob: endjob:
qemuDomainObjEndJob(driver, vm); qemuDomainObjEndJob(driver, vm);
cleanup: cleanup:
/* If we failed to get data from a domain because it's inactive and virHashFree(stats);
* it's not a persistent domain, then force failure.
*/
if (activeFail && vm && !vm->persistent) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("domain is not running"));
ret = -1;
}
virDomainObjEndAPI(&vm); virDomainObjEndAPI(&vm);
virObjectUnref(cfg); virObjectUnref(cfg);
return ret; return ret;