mirror of https://gitee.com/openkylin/libvirt.git
qemu: Add monitor support for CPU halted state
Extended the qemuMonitorCPUInfo with a halted flag. Extract the halted flag for both text and JSON monitor. Signed-off-by: Viktor Mihajlovski <mihajlov@linux.vnet.ibm.com> Signed-off-by: Boris Fiuczynski <fiuczy@linux.vnet.ibm.com>
This commit is contained in:
parent
ab9202e431
commit
cc5e695bde
|
@ -1677,6 +1677,7 @@ qemuMonitorCPUInfoClear(qemuMonitorCPUInfoPtr cpus,
|
||||||
cpus[i].thread_id = -1;
|
cpus[i].thread_id = -1;
|
||||||
cpus[i].vcpus = 0;
|
cpus[i].vcpus = 0;
|
||||||
cpus[i].tid = 0;
|
cpus[i].tid = 0;
|
||||||
|
cpus[i].halted = false;
|
||||||
|
|
||||||
VIR_FREE(cpus[i].qom_path);
|
VIR_FREE(cpus[i].qom_path);
|
||||||
VIR_FREE(cpus[i].alias);
|
VIR_FREE(cpus[i].alias);
|
||||||
|
@ -1725,8 +1726,10 @@ qemuMonitorGetCPUInfoLegacy(struct qemuMonitorQueryCpusEntry *cpuentries,
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
for (i = 0; i < maxvcpus; i++) {
|
for (i = 0; i < maxvcpus; i++) {
|
||||||
if (i < ncpuentries)
|
if (i < ncpuentries) {
|
||||||
vcpus[i].tid = cpuentries[i].tid;
|
vcpus[i].tid = cpuentries[i].tid;
|
||||||
|
vcpus[i].halted = cpuentries[i].halted;
|
||||||
|
}
|
||||||
|
|
||||||
/* for legacy hotplug to work we need to fake the vcpu count added by
|
/* for legacy hotplug to work we need to fake the vcpu count added by
|
||||||
* enabling a given vcpu */
|
* enabling a given vcpu */
|
||||||
|
@ -1864,6 +1867,7 @@ qemuMonitorGetCPUInfoHotplug(struct qemuMonitorQueryHotpluggableCpusEntry *hotpl
|
||||||
}
|
}
|
||||||
|
|
||||||
vcpus[anyvcpu].tid = cpuentries[j].tid;
|
vcpus[anyvcpu].tid = cpuentries[j].tid;
|
||||||
|
vcpus[anyvcpu].halted = cpuentries[j].halted;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -394,6 +394,7 @@ int qemuMonitorSystemPowerdown(qemuMonitorPtr mon);
|
||||||
struct qemuMonitorQueryCpusEntry {
|
struct qemuMonitorQueryCpusEntry {
|
||||||
pid_t tid;
|
pid_t tid;
|
||||||
char *qom_path;
|
char *qom_path;
|
||||||
|
bool halted;
|
||||||
};
|
};
|
||||||
void qemuMonitorQueryCpusFree(struct qemuMonitorQueryCpusEntry *entries,
|
void qemuMonitorQueryCpusFree(struct qemuMonitorQueryCpusEntry *entries,
|
||||||
size_t nentries);
|
size_t nentries);
|
||||||
|
@ -441,6 +442,8 @@ struct _qemuMonitorCPUInfo {
|
||||||
|
|
||||||
/* internal for use in the matching code */
|
/* internal for use in the matching code */
|
||||||
char *qom_path;
|
char *qom_path;
|
||||||
|
|
||||||
|
bool halted;
|
||||||
};
|
};
|
||||||
typedef struct _qemuMonitorCPUInfo qemuMonitorCPUInfo;
|
typedef struct _qemuMonitorCPUInfo qemuMonitorCPUInfo;
|
||||||
typedef qemuMonitorCPUInfo *qemuMonitorCPUInfoPtr;
|
typedef qemuMonitorCPUInfo *qemuMonitorCPUInfoPtr;
|
||||||
|
|
|
@ -1349,6 +1349,7 @@ qemuMonitorJSONExtractCPUInfo(virJSONValuePtr data,
|
||||||
for (i = 0; i < ncpus; i++) {
|
for (i = 0; i < ncpus; i++) {
|
||||||
virJSONValuePtr entry = virJSONValueArrayGet(data, i);
|
virJSONValuePtr entry = virJSONValueArrayGet(data, i);
|
||||||
int thread = 0;
|
int thread = 0;
|
||||||
|
bool halted = false;
|
||||||
const char *qom_path;
|
const char *qom_path;
|
||||||
if (!entry) {
|
if (!entry) {
|
||||||
ret = -2;
|
ret = -2;
|
||||||
|
@ -1358,9 +1359,11 @@ qemuMonitorJSONExtractCPUInfo(virJSONValuePtr data,
|
||||||
/* Some older qemu versions don't report the thread_id so treat this as
|
/* Some older qemu versions don't report the thread_id so treat this as
|
||||||
* non-fatal, simply returning no data */
|
* non-fatal, simply returning no data */
|
||||||
ignore_value(virJSONValueObjectGetNumberInt(entry, "thread_id", &thread));
|
ignore_value(virJSONValueObjectGetNumberInt(entry, "thread_id", &thread));
|
||||||
|
ignore_value(virJSONValueObjectGetBoolean(entry, "halted", &halted));
|
||||||
qom_path = virJSONValueObjectGetString(entry, "qom_path");
|
qom_path = virJSONValueObjectGetString(entry, "qom_path");
|
||||||
|
|
||||||
cpus[i].tid = thread;
|
cpus[i].tid = thread;
|
||||||
|
cpus[i].halted = halted;
|
||||||
if (VIR_STRDUP(cpus[i].qom_path, qom_path) < 0)
|
if (VIR_STRDUP(cpus[i].qom_path, qom_path) < 0)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
|
@ -521,7 +521,7 @@ qemuMonitorTextQueryCPUs(qemuMonitorPtr mon,
|
||||||
* (qemu) info cpus
|
* (qemu) info cpus
|
||||||
* * CPU #0: pc=0x00000000000f0c4a thread_id=30019
|
* * CPU #0: pc=0x00000000000f0c4a thread_id=30019
|
||||||
* CPU #1: pc=0x00000000fffffff0 thread_id=30020
|
* CPU #1: pc=0x00000000fffffff0 thread_id=30020
|
||||||
* CPU #2: pc=0x00000000fffffff0 thread_id=30021
|
* CPU #2: pc=0x00000000fffffff0 (halted) thread_id=30021
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
line = qemucpus;
|
line = qemucpus;
|
||||||
|
@ -541,6 +541,12 @@ qemuMonitorTextQueryCPUs(qemuMonitorPtr mon,
|
||||||
|
|
||||||
cpu.tid = tid;
|
cpu.tid = tid;
|
||||||
|
|
||||||
|
/* Extract halted indicator */
|
||||||
|
if ((offset = strstr(line, "(halted)")) != NULL)
|
||||||
|
cpu.halted = true;
|
||||||
|
else
|
||||||
|
cpu.halted = false;
|
||||||
|
|
||||||
if (VIR_APPEND_ELEMENT_COPY(cpus, ncpus, cpu) < 0) {
|
if (VIR_APPEND_ELEMENT_COPY(cpus, ncpus, cpu) < 0) {
|
||||||
ret = -1;
|
ret = -1;
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
|
|
@ -1332,10 +1332,10 @@ testQemuMonitorJSONqemuMonitorJSONQueryCPUs(const void *data)
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
struct qemuMonitorQueryCpusEntry *cpudata = NULL;
|
struct qemuMonitorQueryCpusEntry *cpudata = NULL;
|
||||||
struct qemuMonitorQueryCpusEntry expect[] = {
|
struct qemuMonitorQueryCpusEntry expect[] = {
|
||||||
{17622, (char *) "/machine/unattached/device[0]"},
|
{17622, (char *) "/machine/unattached/device[0]", true},
|
||||||
{17624, (char *) "/machine/unattached/device[1]"},
|
{17624, (char *) "/machine/unattached/device[1]", true},
|
||||||
{17626, (char *) "/machine/unattached/device[2]"},
|
{17626, (char *) "/machine/unattached/device[2]", true},
|
||||||
{17628, NULL},
|
{17628, NULL, true},
|
||||||
};
|
};
|
||||||
size_t ncpudata = 0;
|
size_t ncpudata = 0;
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
Loading…
Reference in New Issue