From eb084a733b79520e6762cca643d105594c5256db Mon Sep 17 00:00:00 2001 From: Jiri Denemark Date: Fri, 27 Nov 2015 12:30:09 +0100 Subject: [PATCH] qemu: Report more migration statistics memory_dirty_rate corresponds to dirty-pages-rate in QEMU and memory_iteration is what QEMU reports in dirty-sync-count. Signed-off-by: Jiri Denemark --- include/libvirt/libvirt-domain.h | 19 +++++++++++++++++++ src/qemu/qemu_domain.c | 8 ++++++++ src/qemu/qemu_migration.c | 12 ++++++++++++ src/qemu/qemu_monitor.h | 2 ++ src/qemu/qemu_monitor_json.c | 4 ++++ tools/virsh-domain.c | 16 ++++++++++++++++ 6 files changed, 61 insertions(+) diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h index a1ea6a5d07..d26faa5db6 100644 --- a/include/libvirt/libvirt-domain.h +++ b/include/libvirt/libvirt-domain.h @@ -2724,6 +2724,25 @@ int virDomainAbortJob(virDomainPtr dom); */ # define VIR_DOMAIN_JOB_MEMORY_BPS "memory_bps" +/** VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE: + * + * virDomainGetJobStats field: number of memory pages dirtied by the guest + * per second, as VIR_TYPED_PARAM_ULLONG. This statistics makes sense only + * when live migration is running. + */ +# define VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE "memory_dirty_rate" + +/** + * VIR_DOMAIN_JOB_MEMORY_ITERATION: + * + * virDomainGetJobStats field: current iteration over domain's memory + * during live migration, as VIR_TYPED_PARAM_ULLONG. This is set to zero + * when memory starts to be transferred and the value is increased by one + * every time a new iteration is started to transfer memory pages dirtied + * since the last iteration. + */ +# define VIR_DOMAIN_JOB_MEMORY_ITERATION "memory_iteration" + /** * VIR_DOMAIN_JOB_DISK_TOTAL: * diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index e76e76b9cb..080f508eb9 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -383,6 +383,14 @@ qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, goto error; } + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE, + stats->ram_dirty_rate) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_ITERATION, + stats->ram_iteration) < 0) + goto error; + if (virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_DISK_TOTAL, stats->disk_total) < 0 || diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index cd4ed94e6d..290e6205ae 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -750,6 +750,13 @@ qemuMigrationCookieStatisticsXMLFormat(virBufferPtr buf, stats->ram_normal_bytes); } + virBufferAsprintf(buf, "<%1$s>%2$llu\n", + VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE, + stats->ram_dirty_rate); + virBufferAsprintf(buf, "<%1$s>%2$llu\n", + VIR_DOMAIN_JOB_MEMORY_ITERATION, + stats->ram_iteration); + virBufferAsprintf(buf, "<%1$s>%2$llu\n", VIR_DOMAIN_JOB_DISK_TOTAL, stats->disk_total); @@ -1100,6 +1107,11 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES "[1])", ctxt, &stats->ram_normal_bytes); + virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE "[1])", + ctxt, &stats->ram_dirty_rate); + virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_ITERATION "[1])", + ctxt, &stats->ram_iteration); + virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_TOTAL "[1])", ctxt, &stats->disk_total); virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_PROCESSED "[1])", diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 4f1c8d37e6..4193ad2fa3 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -493,6 +493,8 @@ struct _qemuMonitorMigrationStats { unsigned long long ram_duplicate; unsigned long long ram_normal; unsigned long long ram_normal_bytes; + unsigned long long ram_dirty_rate; + unsigned long long ram_iteration; unsigned long long disk_transferred; unsigned long long disk_remaining; diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index 50d05b4424..077be3abfb 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -2520,6 +2520,10 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply, &stats->ram_normal)); ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes", &stats->ram_normal_bytes)); + ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate", + &stats->ram_dirty_rate)); + ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count", + &stats->ram_iteration)); disk = virJSONValueObjectGetObject(ret, "disk"); if (disk) { diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 3594c6f2f9..7c65bf4886 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6045,6 +6045,22 @@ cmdDomjobinfo(vshControl *ctl, const vshCmd *cmd) vshPrint(ctl, "%-17s %-.3lf %s/s\n", _("Memory bandwidth:"), val, unit); } + + if ((rc = virTypedParamsGetULLong(params, nparams, + VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE, + &value)) < 0) { + goto save_error; + } else if (rc) { + vshPrint(ctl, "%-17s %-12llu pages/s\n", _("Dirty rate:"), value); + } + + if ((rc = virTypedParamsGetULLong(params, nparams, + VIR_DOMAIN_JOB_MEMORY_ITERATION, + &value)) < 0) { + goto save_error; + } else if (rc) { + vshPrint(ctl, "%-17s %-12llu\n", _("Iteration:"), value); + } } if (info.fileTotal || info.fileRemaining || info.fileProcessed) {