qemu: Refactor qemuDomainGetBlockInfo

Change the code so that it queries the monitor when the VM is alive.
This commit is contained in:
Peter Krempa 2015-06-23 18:22:54 +02:00
parent 0d130a011c
commit 15fa84acbb
1 changed files with 53 additions and 37 deletions

View File

@ -11800,10 +11800,12 @@ qemuDomainGetBlockInfo(virDomainPtr dom,
virQEMUDriverPtr driver = dom->conn->privateData; virQEMUDriverPtr driver = dom->conn->privateData;
virDomainObjPtr vm; virDomainObjPtr vm;
int ret = -1; int ret = -1;
virDomainDiskDefPtr disk = NULL; virDomainDiskDefPtr disk;
virStorageSourcePtr src;
bool activeFail = false;
virQEMUDriverConfigPtr cfg = NULL; virQEMUDriverConfigPtr cfg = NULL;
int rc;
virHashTablePtr stats = NULL;
qemuBlockStats *entry;
char *alias;
virCheckFlags(0, -1); virCheckFlags(0, -1);
@ -11815,11 +11817,6 @@ qemuDomainGetBlockInfo(virDomainPtr dom,
if (virDomainGetBlockInfoEnsureACL(dom->conn, vm->def) < 0) if (virDomainGetBlockInfoEnsureACL(dom->conn, vm->def) < 0)
goto cleanup; goto cleanup;
/* Technically, we only need a job if we are going to query the
* monitor, which is only for active domains that are using
* non-raw block devices. But it is easier to share code if we
* always grab a job; furthermore, grabbing the job ensures that
* hot-plug won't change disk behind our backs. */
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup; goto cleanup;
@ -11829,53 +11826,72 @@ qemuDomainGetBlockInfo(virDomainPtr dom,
goto endjob; goto endjob;
} }
src = disk->src; if (virStorageSourceIsEmpty(disk->src)) {
if (virStorageSourceIsEmpty(src)) {
virReportError(VIR_ERR_INVALID_ARG, virReportError(VIR_ERR_INVALID_ARG,
_("disk '%s' does not currently have a source assigned"), _("disk '%s' does not currently have a source assigned"),
path); path);
goto endjob; goto endjob;
} }
if ((ret = qemuStorageLimitsRefresh(driver, cfg, vm, src)) < 0) /* for inactive domains we have to peek into the files */
if (!virDomainObjIsActive(vm)) {
if ((qemuStorageLimitsRefresh(driver, cfg, vm, disk->src)) < 0)
goto endjob; goto endjob;
if (!src->allocation) { info->capacity = disk->src->capacity;
qemuDomainObjPrivatePtr priv = vm->privateData; info->allocation = disk->src->allocation;
info->physical = disk->src->physical;
/* If the guest is not running, then success/failure return ret = 0;
* depends on whether domain is persistent goto endjob;
*/ }
if (!virDomainObjIsActive(vm)) {
activeFail = true; if (!disk->info.alias ||
!(alias = qemuDomainStorageAlias(disk->info.alias, 0))) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("missing disk device alias name for %s"), disk->dst);
goto endjob; goto endjob;
} }
qemuDomainObjEnterMonitor(driver, vm); qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorGetBlockExtent(priv->mon, rc = qemuMonitorGetAllBlockStatsInfo(qemuDomainGetMonitor(vm),
disk->info.alias, &stats, false);
&src->allocation); if (rc >= 0)
if (qemuDomainObjExitMonitor(driver, vm) < 0) rc = qemuMonitorBlockStatsUpdateCapacity(qemuDomainGetMonitor(vm),
ret = -1; stats, false);
if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0)
goto endjob;
if (!(entry = virHashLookup(stats, alias))) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("failed to gather stats for disk '%s'"), disk->dst);
goto endjob;
} }
if (ret == 0) { if (!entry->wr_highest_offset_valid) {
info->capacity = src->capacity; if (virStorageSourceGetActualType(disk->src) == VIR_STORAGE_TYPE_BLOCK &&
info->allocation = src->allocation; disk->src->format != VIR_STORAGE_FILE_RAW) {
info->physical = src->physical; virReportError(VIR_ERR_INTERNAL_ERROR,
_("failed to query the maximum written offset of "
"block device '%s'"), disk->dst);
goto endjob;
} }
info->allocation = entry->physical;
} else {
info->allocation = entry->wr_highest_offset;
}
info->capacity = entry->capacity;
info->physical = entry->physical;
ret = 0;
endjob: endjob:
qemuDomainObjEndJob(driver, vm); qemuDomainObjEndJob(driver, vm);
cleanup: cleanup:
/* If we failed to get data from a domain because it's inactive and virHashFree(stats);
* it's not a persistent domain, then force failure.
*/
if (activeFail && vm && !vm->persistent) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("domain is not running"));
ret = -1;
}
virDomainObjEndAPI(&vm); virDomainObjEndAPI(&vm);
virObjectUnref(cfg); virObjectUnref(cfg);
return ret; return ret;