mirror of https://gitee.com/openkylin/qemu.git
* Make checkpatch say 'qemu' instead of 'kernel' (Aleksandar)
* Fix PSE guests with emulated NPT (Alexander B. #1) * Fix leak (Alexander B. #2) * HVF fixes (Roman, Cameron) * New Sapphire Rapids CPUID bits (Cathy) * cpus.c and softmmu/ cleanups (Claudio) * TAP driver tweaks (Daniel, Havard) * object-add bugfix and testcases (Eric A.) * Fix Coverity MIN_CONST and MAX_CONST (Eric B.) * "info lapic" improvement (Jan) * SSE fixes (Joseph) * "-msg guest-name" option (Mario) * support for AMD nested live migration (myself) * Small i386 TCG fixes (myself) * improved error reporting for Xen (myself) * fix "-cpu host -overcommit cpu-pm=on" (myself) * Add accel/Kconfig (Philippe) * iscsi sense handling fixes (Yongji) * Misc bugfixes -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAl8I+tUUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroPeBAf/boMysOXS3dXzGnKOofcWuCfEhSqL xVI9AmHeZfAE1xpaN8Xf7wKjEL80FpIdVZ9835XmQRlvWKHhZ+1C9FFwOKYtOIou fJutCqt0xaY+2XRTiIGJr1zV2AU031B0rOGtoRa8ZzkBD3ujManDrsRWNQOtTb+r DgdLoTZKu4VzJsKGsC9vUmKdYtt6TvbYsJP75nZmQ138sPCiQ4px1MpROXdX6bpf lUeWH4ot5WS6nmTH4JaBtugzfaHo+H37y4t/ME8rt6aq1ILA6itdjYRVLgIep4lc qGltz7YqieJzRNQRJ57x3IwgvLvG89Ijpzz3WIrmRKOcJv87PPWPOhLdUg== =p0WO -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging * Make checkpatch say 'qemu' instead of 'kernel' (Aleksandar) * Fix PSE guests with emulated NPT (Alexander B. #1) * Fix leak (Alexander B. #2) * HVF fixes (Roman, Cameron) * New Sapphire Rapids CPUID bits (Cathy) * cpus.c and softmmu/ cleanups (Claudio) * TAP driver tweaks (Daniel, Havard) * object-add bugfix and testcases (Eric A.) * Fix Coverity MIN_CONST and MAX_CONST (Eric B.) * "info lapic" improvement (Jan) * SSE fixes (Joseph) * "-msg guest-name" option (Mario) * support for AMD nested live migration (myself) * Small i386 TCG fixes (myself) * improved error reporting for Xen (myself) * fix "-cpu host -overcommit cpu-pm=on" (myself) * Add accel/Kconfig (Philippe) * iscsi sense handling fixes (Yongji) * Misc bugfixes # gpg: Signature made Sat 11 Jul 2020 00:33:41 BST # gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83 # gpg: issuer "pbonzini@redhat.com" # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full] # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (47 commits) linux-headers: update again to 5.8 apic: Report current_count via 'info lapic' scripts: improve message when TAP based tests fail target/i386: Enable TSX Suspend Load Address Tracking feature target/i386: Add SERIALIZE cpu feature softmmu/vl: Remove the check for colons in -accel parameters cpu-throttle: new module, extracted from cpus.c softmmu: move softmmu only files from root pc: fix leak in pc_system_flash_cleanup_unused cpus: Move CPU code from exec.c to cpus-common.c target/i386: Correct the warning message of Intel PT checkpatch: Change occurences of 'kernel' to 'qemu' in user messages iscsi: return -EIO when sense fields are meaningless iscsi: handle check condition status in retry loop target/i386: sev: fail query-sev-capabilities if QEMU cannot use SEV target/i386: sev: provide proper error reporting for query-sev-capabilities KVM: x86: believe what KVM says about WAITPKG target/i386: implement undocumented "smsw r32" behavior target/i386: remove gen_io_end Makefile: simplify MINIKCONF rules ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
2033cc6efa
|
@ -0,0 +1,4 @@
|
|||
source Kconfig.host
|
||||
source backends/Kconfig
|
||||
source accel/Kconfig
|
||||
source hw/Kconfig
|
|
@ -2,9 +2,6 @@
|
|||
# down to Kconfig. See also MINIKCONF_ARGS in the Makefile:
|
||||
# these two need to be kept in sync.
|
||||
|
||||
config KVM
|
||||
bool
|
||||
|
||||
config LINUX
|
||||
bool
|
||||
|
||||
|
@ -31,10 +28,6 @@ config VHOST_KERNEL
|
|||
bool
|
||||
select VHOST
|
||||
|
||||
config XEN
|
||||
bool
|
||||
select FSDEV_9P if VIRTFS
|
||||
|
||||
config VIRTFS
|
||||
bool
|
||||
|
||||
|
|
29
MAINTAINERS
29
MAINTAINERS
|
@ -115,7 +115,7 @@ Overall TCG CPUs
|
|||
M: Richard Henderson <rth@twiddle.net>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: cpus.c
|
||||
F: softmmu/cpus.c
|
||||
F: cpus-common.c
|
||||
F: exec.c
|
||||
F: accel/tcg/
|
||||
|
@ -362,7 +362,7 @@ Overall KVM CPUs
|
|||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
F: */kvm.*
|
||||
F: */*/kvm*
|
||||
F: accel/kvm/
|
||||
F: accel/stubs/kvm-stub.c
|
||||
F: include/hw/kvm/
|
||||
|
@ -416,8 +416,21 @@ S: Supported
|
|||
F: target/i386/kvm.c
|
||||
F: scripts/kvm/vmxcap
|
||||
|
||||
Guest CPU Cores (other accelerators)
|
||||
------------------------------------
|
||||
Overall
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: include/sysemu/accel.h
|
||||
F: accel/accel.c
|
||||
F: accel/Makefile.objs
|
||||
F: accel/stubs/Makefile.objs
|
||||
|
||||
X86 HVF CPUs
|
||||
M: Cameron Esfahani <dirty@apple.com>
|
||||
M: Roman Bolshakov <r.bolshakov@yadro.com>
|
||||
W: https://wiki.qemu.org/Features/HVF
|
||||
S: Maintained
|
||||
F: accel/stubs/hvf-stub.c
|
||||
F: target/i386/hvf/
|
||||
|
@ -465,6 +478,7 @@ M: Colin Xu <colin.xu@intel.com>
|
|||
L: haxm-team@intel.com
|
||||
W: https://github.com/intel/haxm/issues
|
||||
S: Maintained
|
||||
F: accel/stubs/hax-stub.c
|
||||
F: include/sysemu/hax.h
|
||||
F: target/i386/hax-*
|
||||
|
||||
|
@ -1710,7 +1724,7 @@ M: David Hildenbrand <david@redhat.com>
|
|||
S: Maintained
|
||||
F: hw/virtio/virtio-balloon*.c
|
||||
F: include/hw/virtio/virtio-balloon.h
|
||||
F: balloon.c
|
||||
F: softmmu/balloon.c
|
||||
F: include/sysemu/balloon.h
|
||||
|
||||
virtio-9p
|
||||
|
@ -2189,12 +2203,12 @@ Memory API
|
|||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Supported
|
||||
F: include/exec/ioport.h
|
||||
F: ioport.c
|
||||
F: include/exec/memop.h
|
||||
F: include/exec/memory.h
|
||||
F: include/exec/ram_addr.h
|
||||
F: include/exec/ramblock.h
|
||||
F: memory.c
|
||||
F: softmmu/ioport.c
|
||||
F: softmmu/memory.c
|
||||
F: include/exec/memory-internal.h
|
||||
F: exec.c
|
||||
F: scripts/coccinelle/memory-region-housekeeping.cocci
|
||||
|
@ -2226,13 +2240,14 @@ F: ui/cocoa.m
|
|||
Main loop
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: cpus.c
|
||||
F: include/qemu/main-loop.h
|
||||
F: include/sysemu/runstate.h
|
||||
F: util/main-loop.c
|
||||
F: util/qemu-timer.c
|
||||
F: softmmu/vl.c
|
||||
F: softmmu/main.c
|
||||
F: softmmu/cpus.c
|
||||
F: softmmu/cpu-throttle.c
|
||||
F: qapi/run-state.json
|
||||
|
||||
Human Monitor (HMP)
|
||||
|
@ -2387,7 +2402,7 @@ M: Thomas Huth <thuth@redhat.com>
|
|||
M: Laurent Vivier <lvivier@redhat.com>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: qtest.c
|
||||
F: softmmu/qtest.c
|
||||
F: accel/qtest.c
|
||||
F: tests/qtest/
|
||||
X: tests/qtest/bios-tables-test-allowed-diff.h
|
||||
|
|
12
Makefile
12
Makefile
|
@ -404,7 +404,8 @@ endif
|
|||
# This has to be kept in sync with Kconfig.host.
|
||||
MINIKCONF_ARGS = \
|
||||
$(CONFIG_MINIKCONF_MODE) \
|
||||
$@ $*/config-devices.mak.d $< $(MINIKCONF_INPUTS) \
|
||||
$@ $*/config-devices.mak.d $< $(SRC_PATH)/Kconfig \
|
||||
CONFIG_TCG=$(CONFIG_TCG) \
|
||||
CONFIG_KVM=$(CONFIG_KVM) \
|
||||
CONFIG_SPICE=$(CONFIG_SPICE) \
|
||||
CONFIG_IVSHMEM=$(CONFIG_IVSHMEM) \
|
||||
|
@ -418,12 +419,11 @@ MINIKCONF_ARGS = \
|
|||
CONFIG_LINUX=$(CONFIG_LINUX) \
|
||||
CONFIG_PVRDMA=$(CONFIG_PVRDMA)
|
||||
|
||||
MINIKCONF_INPUTS = $(SRC_PATH)/Kconfig.host $(SRC_PATH)/backends/Kconfig $(SRC_PATH)/hw/Kconfig
|
||||
MINIKCONF_DEPS = $(MINIKCONF_INPUTS) $(wildcard $(SRC_PATH)/hw/*/Kconfig)
|
||||
MINIKCONF = $(PYTHON) $(SRC_PATH)/scripts/minikconf.py \
|
||||
MINIKCONF = $(PYTHON) $(SRC_PATH)/scripts/minikconf.py
|
||||
|
||||
$(SUBDIR_DEVICES_MAK): %/config-devices.mak: default-configs/%.mak $(MINIKCONF_DEPS) $(BUILD_DIR)/config-host.mak
|
||||
$(call quiet-command, $(MINIKCONF) $(MINIKCONF_ARGS) > $@.tmp, "GEN", "$@.tmp")
|
||||
$(SUBDIR_DEVICES_MAK): %/config-devices.mak: default-configs/%.mak $(SRC_PATH)/Kconfig $(BUILD_DIR)/config-host.mak
|
||||
$(call quiet-command, $(MINIKCONF) $(MINIKCONF_ARGS) \
|
||||
> $@.tmp, "GEN", "$@.tmp")
|
||||
$(call quiet-command, if test -f $@; then \
|
||||
if cmp -s $@.old $@; then \
|
||||
mv $@.tmp $@; \
|
||||
|
|
|
@ -152,16 +152,13 @@ endif #CONFIG_BSD_USER
|
|||
#########################################################
|
||||
# System emulator target
|
||||
ifdef CONFIG_SOFTMMU
|
||||
obj-y += arch_init.o cpus.o gdbstub.o balloon.o ioport.o
|
||||
obj-y += qtest.o
|
||||
obj-y += softmmu/
|
||||
obj-y += gdbstub.o
|
||||
obj-y += dump/
|
||||
obj-y += hw/
|
||||
obj-y += monitor/
|
||||
obj-y += qapi/
|
||||
obj-y += memory.o
|
||||
obj-y += memory_mapping.o
|
||||
obj-y += migration/ram.o
|
||||
obj-y += softmmu/
|
||||
LIBS := $(libs_softmmu) $(LIBS)
|
||||
|
||||
# Hardware support
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
config TCG
|
||||
bool
|
||||
|
||||
config KVM
|
||||
bool
|
||||
|
||||
config XEN
|
||||
bool
|
||||
select FSDEV_9P if VIRTFS
|
|
@ -22,3 +22,10 @@ void tb_flush(CPUState *cpu)
|
|||
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
|
||||
{
|
||||
}
|
||||
|
||||
void *probe_access(CPUArchState *env, target_ulong addr, int size,
|
||||
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
/* Handled by hardware accelerator. */
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
|
|
@ -241,9 +241,11 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
|
|||
|
||||
iTask->status = status;
|
||||
iTask->do_retry = 0;
|
||||
iTask->err_code = 0;
|
||||
iTask->task = task;
|
||||
|
||||
if (status != SCSI_STATUS_GOOD) {
|
||||
iTask->err_code = -EIO;
|
||||
if (iTask->retries++ < ISCSI_CMD_RETRIES) {
|
||||
if (status == SCSI_STATUS_BUSY ||
|
||||
status == SCSI_STATUS_TIMEOUT ||
|
||||
|
@ -266,16 +268,16 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
|
|||
timer_mod(&iTask->retry_timer,
|
||||
qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + retry_time);
|
||||
iTask->do_retry = 1;
|
||||
}
|
||||
} else if (status == SCSI_STATUS_CHECK_CONDITION) {
|
||||
int error = iscsi_translate_sense(&task->sense);
|
||||
if (error == EAGAIN) {
|
||||
error_report("iSCSI CheckCondition: %s",
|
||||
iscsi_get_error(iscsi));
|
||||
iTask->do_retry = 1;
|
||||
} else {
|
||||
iTask->err_code = -error;
|
||||
iTask->err_str = g_strdup(iscsi_get_error(iscsi));
|
||||
} else if (status == SCSI_STATUS_CHECK_CONDITION) {
|
||||
int error = iscsi_translate_sense(&task->sense);
|
||||
if (error == EAGAIN) {
|
||||
error_report("iSCSI CheckCondition: %s",
|
||||
iscsi_get_error(iscsi));
|
||||
iTask->do_retry = 1;
|
||||
} else {
|
||||
iTask->err_code = -error;
|
||||
iTask->err_str = g_strdup(iscsi_get_error(iscsi));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -72,6 +72,8 @@ static int cpu_get_free_index(void)
|
|||
return max_cpu_index;
|
||||
}
|
||||
|
||||
CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
|
||||
|
||||
void cpu_list_add(CPUState *cpu)
|
||||
{
|
||||
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
|
||||
|
@ -96,6 +98,22 @@ void cpu_list_remove(CPUState *cpu)
|
|||
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
|
||||
}
|
||||
|
||||
CPUState *qemu_get_cpu(int index)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu->cpu_index == index) {
|
||||
return cpu;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* current CPU in the current thread. It is only valid inside cpu_exec() */
|
||||
__thread CPUState *current_cpu;
|
||||
|
||||
struct qemu_work_item {
|
||||
QSIMPLEQ_ENTRY(qemu_work_item) node;
|
||||
run_on_cpu_func func;
|
||||
|
|
22
exec.c
22
exec.c
|
@ -98,12 +98,6 @@ AddressSpace address_space_memory;
|
|||
static MemoryRegion io_mem_unassigned;
|
||||
#endif
|
||||
|
||||
CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
|
||||
|
||||
/* current CPU in the current thread. It is only valid inside
|
||||
cpu_exec() */
|
||||
__thread CPUState *current_cpu;
|
||||
|
||||
uintptr_t qemu_host_page_size;
|
||||
intptr_t qemu_host_page_mask;
|
||||
|
||||
|
@ -832,22 +826,6 @@ const VMStateDescription vmstate_cpu_common = {
|
|||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
CPUState *qemu_get_cpu(int index)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu->cpu_index == index) {
|
||||
return cpu;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
void cpu_address_space_init(CPUState *cpu, int asidx,
|
||||
const char *prefix, MemoryRegion *mr)
|
||||
{
|
||||
|
|
|
@ -50,6 +50,11 @@ static void machine_none_machine_init(MachineClass *mc)
|
|||
mc->max_cpus = 1;
|
||||
mc->default_ram_size = 0;
|
||||
mc->default_ram_id = "ram";
|
||||
mc->no_serial = 1;
|
||||
mc->no_parallel = 1;
|
||||
mc->no_floppy = 1;
|
||||
mc->no_cdrom = 1;
|
||||
mc->no_sdcard = 1;
|
||||
}
|
||||
|
||||
DEFINE_MACHINE("none", machine_none_machine_init)
|
||||
|
|
|
@ -93,6 +93,11 @@ static PFlashCFI01 *pc_pflash_create(PCMachineState *pcms,
|
|||
object_property_add_child(OBJECT(pcms), name, OBJECT(dev));
|
||||
object_property_add_alias(OBJECT(pcms), alias_prop_name,
|
||||
OBJECT(dev), "drive");
|
||||
/*
|
||||
* The returned reference is tied to the child property and
|
||||
* will be removed with object_unparent.
|
||||
*/
|
||||
object_unref(OBJECT(dev));
|
||||
return PFLASH_CFI01(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -615,24 +615,6 @@ int apic_accept_pic_intr(DeviceState *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t apic_get_current_count(APICCommonState *s)
|
||||
{
|
||||
int64_t d;
|
||||
uint32_t val;
|
||||
d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >>
|
||||
s->count_shift;
|
||||
if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) {
|
||||
/* periodic */
|
||||
val = s->initial_count - (d % ((uint64_t)s->initial_count + 1));
|
||||
} else {
|
||||
if (d >= s->initial_count)
|
||||
val = 0;
|
||||
else
|
||||
val = s->initial_count - d;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
static void apic_timer_update(APICCommonState *s, int64_t current_time)
|
||||
{
|
||||
if (apic_next_timer(s, current_time)) {
|
||||
|
|
|
@ -189,6 +189,25 @@ bool apic_next_timer(APICCommonState *s, int64_t current_time)
|
|||
return true;
|
||||
}
|
||||
|
||||
uint32_t apic_get_current_count(APICCommonState *s)
|
||||
{
|
||||
int64_t d;
|
||||
uint32_t val;
|
||||
d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >>
|
||||
s->count_shift;
|
||||
if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) {
|
||||
/* periodic */
|
||||
val = s->initial_count - (d % ((uint64_t)s->initial_count + 1));
|
||||
} else {
|
||||
if (d >= s->initial_count) {
|
||||
val = 0;
|
||||
} else {
|
||||
val = s->initial_count - d;
|
||||
}
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
void apic_init_reset(DeviceState *dev)
|
||||
{
|
||||
APICCommonState *s;
|
||||
|
|
|
@ -822,43 +822,6 @@ bool cpu_exists(int64_t id);
|
|||
*/
|
||||
CPUState *cpu_by_arch_id(int64_t id);
|
||||
|
||||
/**
|
||||
* cpu_throttle_set:
|
||||
* @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
|
||||
*
|
||||
* Throttles all vcpus by forcing them to sleep for the given percentage of
|
||||
* time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
|
||||
* (example: 10ms sleep for every 30ms awake).
|
||||
*
|
||||
* cpu_throttle_set can be called as needed to adjust new_throttle_pct.
|
||||
* Once the throttling starts, it will remain in effect until cpu_throttle_stop
|
||||
* is called.
|
||||
*/
|
||||
void cpu_throttle_set(int new_throttle_pct);
|
||||
|
||||
/**
|
||||
* cpu_throttle_stop:
|
||||
*
|
||||
* Stops the vcpu throttling started by cpu_throttle_set.
|
||||
*/
|
||||
void cpu_throttle_stop(void);
|
||||
|
||||
/**
|
||||
* cpu_throttle_active:
|
||||
*
|
||||
* Returns: %true if the vcpus are currently being throttled, %false otherwise.
|
||||
*/
|
||||
bool cpu_throttle_active(void);
|
||||
|
||||
/**
|
||||
* cpu_throttle_get_percentage:
|
||||
*
|
||||
* Returns the vcpu throttle percentage. See cpu_throttle_set for details.
|
||||
*
|
||||
* Returns: The throttle percentage in range 1 to 99.
|
||||
*/
|
||||
int cpu_throttle_get_percentage(void);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
typedef void (*CPUInterruptHandler)(CPUState *, int);
|
||||
|
|
|
@ -211,6 +211,7 @@ void vapic_report_tpr_access(DeviceState *dev, CPUState *cpu, target_ulong ip,
|
|||
TPRAccess access);
|
||||
|
||||
int apic_get_ppr(APICCommonState *s);
|
||||
uint32_t apic_get_current_count(APICCommonState *s);
|
||||
|
||||
static inline void apic_set_bit(uint32_t *tab, int index)
|
||||
{
|
||||
|
|
|
@ -75,5 +75,7 @@ void error_init(const char *argv0);
|
|||
const char *error_get_progname(void);
|
||||
|
||||
extern bool error_with_timestamp;
|
||||
extern bool error_with_guestname;
|
||||
extern const char *error_guest_name;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -303,6 +303,11 @@ void qemu_mutex_unlock_iothread(void);
|
|||
*/
|
||||
void qemu_cond_wait_iothread(QemuCond *cond);
|
||||
|
||||
/*
|
||||
* qemu_cond_timedwait_iothread: like the previous, but with timeout
|
||||
*/
|
||||
void qemu_cond_timedwait_iothread(QemuCond *cond, int ms);
|
||||
|
||||
/* internal interfaces */
|
||||
|
||||
void qemu_fd_register(int fd);
|
||||
|
|
|
@ -250,7 +250,8 @@ extern int daemon(int, int);
|
|||
* Note that neither form is usable as an #if condition; if you truly
|
||||
* need to write conditional code that depends on a minimum or maximum
|
||||
* determined by the pre-processor instead of the compiler, you'll
|
||||
* have to open-code it.
|
||||
* have to open-code it. Sadly, Coverity is severely confused by the
|
||||
* constant variants, so we have to dumb things down there.
|
||||
*/
|
||||
#undef MIN
|
||||
#define MIN(a, b) \
|
||||
|
@ -258,22 +259,28 @@ extern int daemon(int, int);
|
|||
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
|
||||
_a < _b ? _a : _b; \
|
||||
})
|
||||
#define MIN_CONST(a, b) \
|
||||
__builtin_choose_expr( \
|
||||
__builtin_constant_p(a) && __builtin_constant_p(b), \
|
||||
(a) < (b) ? (a) : (b), \
|
||||
((void)0))
|
||||
#undef MAX
|
||||
#define MAX(a, b) \
|
||||
({ \
|
||||
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
|
||||
_a > _b ? _a : _b; \
|
||||
})
|
||||
#define MAX_CONST(a, b) \
|
||||
|
||||
#ifdef __COVERITY__
|
||||
# define MIN_CONST(a, b) ((a) < (b) ? (a) : (b))
|
||||
# define MAX_CONST(a, b) ((a) > (b) ? (a) : (b))
|
||||
#else
|
||||
# define MIN_CONST(a, b) \
|
||||
__builtin_choose_expr( \
|
||||
__builtin_constant_p(a) && __builtin_constant_p(b), \
|
||||
(a) < (b) ? (a) : (b), \
|
||||
((void)0))
|
||||
# define MAX_CONST(a, b) \
|
||||
__builtin_choose_expr( \
|
||||
__builtin_constant_p(a) && __builtin_constant_p(b), \
|
||||
(a) > (b) ? (a) : (b), \
|
||||
((void)0))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Minimum function that returns zero only if both values are zero.
|
||||
|
|
|
@ -1047,7 +1047,7 @@ Object *object_ref(Object *obj);
|
|||
void object_unref(Object *obj);
|
||||
|
||||
/**
|
||||
* object_property_add:
|
||||
* object_property_try_add:
|
||||
* @obj: the object to add a property to
|
||||
* @name: the name of the property. This can contain any character except for
|
||||
* a forward slash. In general, you should use hyphens '-' instead of
|
||||
|
@ -1064,10 +1064,23 @@ void object_unref(Object *obj);
|
|||
* meant to allow a property to free its opaque upon object
|
||||
* destruction. This may be NULL.
|
||||
* @opaque: an opaque pointer to pass to the callbacks for the property
|
||||
* @errp: pointer to error object
|
||||
*
|
||||
* Returns: The #ObjectProperty; this can be used to set the @resolve
|
||||
* callback for child and link properties.
|
||||
*/
|
||||
ObjectProperty *object_property_try_add(Object *obj, const char *name,
|
||||
const char *type,
|
||||
ObjectPropertyAccessor *get,
|
||||
ObjectPropertyAccessor *set,
|
||||
ObjectPropertyRelease *release,
|
||||
void *opaque, Error **errp);
|
||||
|
||||
/**
|
||||
* object_property_add:
|
||||
* Same as object_property_try_add() with @errp hardcoded to
|
||||
* &error_abort.
|
||||
*/
|
||||
ObjectProperty *object_property_add(Object *obj, const char *name,
|
||||
const char *type,
|
||||
ObjectPropertyAccessor *get,
|
||||
|
@ -1518,10 +1531,11 @@ Object *object_resolve_path_type(const char *path, const char *typename,
|
|||
Object *object_resolve_path_component(Object *parent, const char *part);
|
||||
|
||||
/**
|
||||
* object_property_add_child:
|
||||
* object_property_try_add_child:
|
||||
* @obj: the object to add a property to
|
||||
* @name: the name of the property
|
||||
* @child: the child object
|
||||
* @errp: pointer to error object
|
||||
*
|
||||
* Child properties form the composition tree. All objects need to be a child
|
||||
* of another object. Objects can only be a child of one object.
|
||||
|
@ -1535,6 +1549,14 @@ Object *object_resolve_path_component(Object *parent, const char *part);
|
|||
*
|
||||
* Returns: The newly added property on success, or %NULL on failure.
|
||||
*/
|
||||
ObjectProperty *object_property_try_add_child(Object *obj, const char *name,
|
||||
Object *child, Error **errp);
|
||||
|
||||
/**
|
||||
* object_property_add_child:
|
||||
* Same as object_property_try_add_child() with @errp hardcoded to
|
||||
* &error_abort
|
||||
*/
|
||||
ObjectProperty *object_property_add_child(Object *obj, const char *name,
|
||||
Object *child);
|
||||
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Copyright (c) 2012 SUSE LINUX Products GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see
|
||||
* <http://www.gnu.org/licenses/gpl-2.0.html>
|
||||
*/
|
||||
|
||||
#ifndef SYSEMU_CPU_THROTTLE_H
|
||||
#define SYSEMU_CPU_THROTTLE_H
|
||||
|
||||
#include "qemu/timer.h"
|
||||
|
||||
/**
|
||||
* cpu_throttle_init:
|
||||
*
|
||||
* Initialize the CPU throttling API.
|
||||
*/
|
||||
void cpu_throttle_init(void);
|
||||
|
||||
/**
|
||||
* cpu_throttle_set:
|
||||
* @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
|
||||
*
|
||||
* Throttles all vcpus by forcing them to sleep for the given percentage of
|
||||
* time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
|
||||
* (example: 10ms sleep for every 30ms awake).
|
||||
*
|
||||
* cpu_throttle_set can be called as needed to adjust new_throttle_pct.
|
||||
* Once the throttling starts, it will remain in effect until cpu_throttle_stop
|
||||
* is called.
|
||||
*/
|
||||
void cpu_throttle_set(int new_throttle_pct);
|
||||
|
||||
/**
|
||||
* cpu_throttle_stop:
|
||||
*
|
||||
* Stops the vcpu throttling started by cpu_throttle_set.
|
||||
*/
|
||||
void cpu_throttle_stop(void);
|
||||
|
||||
/**
|
||||
* cpu_throttle_active:
|
||||
*
|
||||
* Returns: %true if the vcpus are currently being throttled, %false otherwise.
|
||||
*/
|
||||
bool cpu_throttle_active(void);
|
||||
|
||||
/**
|
||||
* cpu_throttle_get_percentage:
|
||||
*
|
||||
* Returns the vcpu throttle percentage. See cpu_throttle_set for details.
|
||||
*
|
||||
* Returns: The throttle percentage in range 1 to 99.
|
||||
*/
|
||||
int cpu_throttle_get_percentage(void);
|
||||
|
||||
#endif /* SYSEMU_CPU_THROTTLE_H */
|
|
@ -28,8 +28,8 @@ int hvf_vcpu_exec(CPUState *);
|
|||
void hvf_cpu_synchronize_state(CPUState *);
|
||||
void hvf_cpu_synchronize_post_reset(CPUState *);
|
||||
void hvf_cpu_synchronize_post_init(CPUState *);
|
||||
void hvf_cpu_synchronize_pre_loadvm(CPUState *);
|
||||
void hvf_vcpu_destroy(CPUState *);
|
||||
void hvf_reset_vcpu(CPUState *);
|
||||
|
||||
#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include "hw/core/cpu.h"
|
||||
#include "sysemu/hax.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/whpx.h"
|
||||
|
||||
static inline void cpu_synchronize_state(CPUState *cpu)
|
||||
|
@ -24,6 +25,9 @@ static inline void cpu_synchronize_state(CPUState *cpu)
|
|||
if (hax_enabled()) {
|
||||
hax_cpu_synchronize_state(cpu);
|
||||
}
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_state(cpu);
|
||||
}
|
||||
if (whpx_enabled()) {
|
||||
whpx_cpu_synchronize_state(cpu);
|
||||
}
|
||||
|
@ -37,6 +41,9 @@ static inline void cpu_synchronize_post_reset(CPUState *cpu)
|
|||
if (hax_enabled()) {
|
||||
hax_cpu_synchronize_post_reset(cpu);
|
||||
}
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_post_reset(cpu);
|
||||
}
|
||||
if (whpx_enabled()) {
|
||||
whpx_cpu_synchronize_post_reset(cpu);
|
||||
}
|
||||
|
@ -50,6 +57,9 @@ static inline void cpu_synchronize_post_init(CPUState *cpu)
|
|||
if (hax_enabled()) {
|
||||
hax_cpu_synchronize_post_init(cpu);
|
||||
}
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_post_init(cpu);
|
||||
}
|
||||
if (whpx_enabled()) {
|
||||
whpx_cpu_synchronize_post_init(cpu);
|
||||
}
|
||||
|
@ -63,6 +73,9 @@ static inline void cpu_synchronize_pre_loadvm(CPUState *cpu)
|
|||
if (hax_enabled()) {
|
||||
hax_cpu_synchronize_pre_loadvm(cpu);
|
||||
}
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_pre_loadvm(cpu);
|
||||
}
|
||||
if (whpx_enabled()) {
|
||||
whpx_cpu_synchronize_pre_loadvm(cpu);
|
||||
}
|
||||
|
|
|
@ -392,5 +392,6 @@
|
|||
#define __NR_clone3 (__NR_SYSCALL_BASE + 435)
|
||||
#define __NR_openat2 (__NR_SYSCALL_BASE + 437)
|
||||
#define __NR_pidfd_getfd (__NR_SYSCALL_BASE + 438)
|
||||
#define __NR_faccessat2 (__NR_SYSCALL_BASE + 439)
|
||||
|
||||
#endif /* _ASM_ARM_UNISTD_COMMON_H */
|
||||
|
|
|
@ -408,14 +408,15 @@ struct kvm_vmx_nested_state_data {
|
|||
};
|
||||
|
||||
struct kvm_vmx_nested_state_hdr {
|
||||
__u32 flags;
|
||||
__u64 vmxon_pa;
|
||||
__u64 vmcs12_pa;
|
||||
__u64 preemption_timer_deadline;
|
||||
|
||||
struct {
|
||||
__u16 flags;
|
||||
} smm;
|
||||
|
||||
__u32 flags;
|
||||
__u64 preemption_timer_deadline;
|
||||
};
|
||||
|
||||
struct kvm_svm_nested_state_data {
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include "socket.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/cpu-throttle.h"
|
||||
#include "rdma.h"
|
||||
#include "ram.h"
|
||||
#include "migration/global_state.h"
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include "migration/colo.h"
|
||||
#include "block.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/cpu-throttle.h"
|
||||
#include "savevm.h"
|
||||
#include "qemu/iov.h"
|
||||
#include "multifd.h"
|
||||
|
|
|
@ -4303,16 +4303,22 @@ HXCOMM Deprecated by -accel tcg
|
|||
DEF("no-kvm", 0, QEMU_OPTION_no_kvm, "", QEMU_ARCH_I386)
|
||||
|
||||
DEF("msg", HAS_ARG, QEMU_OPTION_msg,
|
||||
"-msg timestamp[=on|off]\n"
|
||||
"-msg [timestamp[=on|off]][,guest-name=[on|off]]\n"
|
||||
" control error message format\n"
|
||||
" timestamp=on enables timestamps (default: off)\n",
|
||||
" timestamp=on enables timestamps (default: off)\n"
|
||||
" guest-name=on enables guest name prefix but only if\n"
|
||||
" -name guest option is set (default: off)\n",
|
||||
QEMU_ARCH_ALL)
|
||||
SRST
|
||||
``-msg timestamp[=on|off]``
|
||||
``-msg [timestamp[=on|off]][,guest-name[=on|off]]``
|
||||
Control error message format.
|
||||
|
||||
``timestamp=on|off``
|
||||
Prefix messages with a timestamp. Default is off.
|
||||
|
||||
``guest-name=on|off``
|
||||
Prefix messages with guest name but only if -name guest option is set
|
||||
otherwise the option is ignored. Default is off.
|
||||
ERST
|
||||
|
||||
DEF("dump-vmstate", HAS_ARG, QEMU_OPTION_dump_vmstate,
|
||||
|
|
21
qom/object.c
21
qom/object.c
|
@ -1146,7 +1146,7 @@ void object_unref(Object *obj)
|
|||
}
|
||||
}
|
||||
|
||||
static ObjectProperty *
|
||||
ObjectProperty *
|
||||
object_property_try_add(Object *obj, const char *name, const char *type,
|
||||
ObjectPropertyAccessor *get,
|
||||
ObjectPropertyAccessor *set,
|
||||
|
@ -1675,8 +1675,8 @@ static void object_finalize_child_property(Object *obj, const char *name,
|
|||
}
|
||||
|
||||
ObjectProperty *
|
||||
object_property_add_child(Object *obj, const char *name,
|
||||
Object *child)
|
||||
object_property_try_add_child(Object *obj, const char *name,
|
||||
Object *child, Error **errp)
|
||||
{
|
||||
g_autofree char *type = NULL;
|
||||
ObjectProperty *op;
|
||||
|
@ -1685,14 +1685,25 @@ object_property_add_child(Object *obj, const char *name,
|
|||
|
||||
type = g_strdup_printf("child<%s>", object_get_typename(child));
|
||||
|
||||
op = object_property_add(obj, name, type, object_get_child_property, NULL,
|
||||
object_finalize_child_property, child);
|
||||
op = object_property_try_add(obj, name, type, object_get_child_property,
|
||||
NULL, object_finalize_child_property,
|
||||
child, errp);
|
||||
if (!op) {
|
||||
return NULL;
|
||||
}
|
||||
op->resolve = object_resolve_child_property;
|
||||
object_ref(child);
|
||||
child->parent = obj;
|
||||
return op;
|
||||
}
|
||||
|
||||
ObjectProperty *
|
||||
object_property_add_child(Object *obj, const char *name,
|
||||
Object *child)
|
||||
{
|
||||
return object_property_try_add_child(obj, name, child, &error_abort);
|
||||
}
|
||||
|
||||
void object_property_allow_set_link(const Object *obj, const char *name,
|
||||
Object *val, Error **errp)
|
||||
{
|
||||
|
|
|
@ -83,8 +83,11 @@ Object *user_creatable_add_type(const char *type, const char *id,
|
|||
}
|
||||
|
||||
if (id != NULL) {
|
||||
object_property_add_child(object_get_objects_root(),
|
||||
id, obj);
|
||||
object_property_try_add_child(object_get_objects_root(),
|
||||
id, obj, &local_err);
|
||||
if (local_err) {
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (!user_creatable_complete(USER_CREATABLE(obj), &local_err)) {
|
||||
|
|
|
@ -49,7 +49,7 @@ Version: $V
|
|||
|
||||
Options:
|
||||
-q, --quiet quiet
|
||||
--no-tree run without a kernel tree
|
||||
--no-tree run without a qemu tree
|
||||
--no-signoff do not check for 'Signed-off-by' line
|
||||
--patch treat FILE as patchfile
|
||||
--branch treat args as GIT revision list
|
||||
|
@ -57,7 +57,7 @@ Options:
|
|||
--terse one line per report
|
||||
-f, --file treat FILE as regular source file
|
||||
--strict fail if only warnings are found
|
||||
--root=PATH PATH to the kernel tree root
|
||||
--root=PATH PATH to the qemu tree root
|
||||
--no-summary suppress the per-file summary
|
||||
--mailback only produce a report in case of warnings/errors
|
||||
--summary-file include the filename in summary
|
||||
|
@ -203,7 +203,7 @@ if ($tree) {
|
|||
}
|
||||
|
||||
if (!defined $root) {
|
||||
print "Must be run from the top-level dir. of a kernel tree\n";
|
||||
print "Must be run from the top-level dir. of a qemu tree\n";
|
||||
exit(2);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -217,7 +217,7 @@ sub report ($;$)
|
|||
|
||||
sub testsuite_error ($)
|
||||
{
|
||||
report "ERROR", "- $_[0]";
|
||||
report "ERROR", "$test_name - $_[0]";
|
||||
}
|
||||
|
||||
sub handle_tap_result ($)
|
||||
|
|
|
@ -1,3 +1,14 @@
|
|||
softmmu-main-y = softmmu/main.o
|
||||
|
||||
obj-y += arch_init.o
|
||||
obj-y += cpus.o
|
||||
obj-y += cpu-throttle.o
|
||||
obj-y += balloon.o
|
||||
obj-y += ioport.o
|
||||
obj-y += memory.o
|
||||
obj-y += memory_mapping.o
|
||||
|
||||
obj-y += qtest.o
|
||||
|
||||
obj-y += vl.o
|
||||
vl.o-cflags := $(GPROF_CFLAGS) $(SDL_CFLAGS)
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* QEMU System Emulator
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/cpu-throttle.h"
|
||||
|
||||
/* vcpu throttling controls */
|
||||
static QEMUTimer *throttle_timer;
|
||||
static unsigned int throttle_percentage;
|
||||
|
||||
#define CPU_THROTTLE_PCT_MIN 1
|
||||
#define CPU_THROTTLE_PCT_MAX 99
|
||||
#define CPU_THROTTLE_TIMESLICE_NS 10000000
|
||||
|
||||
static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
|
||||
{
|
||||
double pct;
|
||||
double throttle_ratio;
|
||||
int64_t sleeptime_ns, endtime_ns;
|
||||
|
||||
if (!cpu_throttle_get_percentage()) {
|
||||
return;
|
||||
}
|
||||
|
||||
pct = (double)cpu_throttle_get_percentage() / 100;
|
||||
throttle_ratio = pct / (1 - pct);
|
||||
/* Add 1ns to fix double's rounding error (like 0.9999999...) */
|
||||
sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1);
|
||||
endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
|
||||
while (sleeptime_ns > 0 && !cpu->stop) {
|
||||
if (sleeptime_ns > SCALE_MS) {
|
||||
qemu_cond_timedwait_iothread(cpu->halt_cond,
|
||||
sleeptime_ns / SCALE_MS);
|
||||
} else {
|
||||
qemu_mutex_unlock_iothread();
|
||||
g_usleep(sleeptime_ns / SCALE_US);
|
||||
qemu_mutex_lock_iothread();
|
||||
}
|
||||
sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
}
|
||||
atomic_set(&cpu->throttle_thread_scheduled, 0);
|
||||
}
|
||||
|
||||
static void cpu_throttle_timer_tick(void *opaque)
|
||||
{
|
||||
CPUState *cpu;
|
||||
double pct;
|
||||
|
||||
/* Stop the timer if needed */
|
||||
if (!cpu_throttle_get_percentage()) {
|
||||
return;
|
||||
}
|
||||
CPU_FOREACH(cpu) {
|
||||
if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
|
||||
async_run_on_cpu(cpu, cpu_throttle_thread,
|
||||
RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
||||
pct = (double)cpu_throttle_get_percentage() / 100;
|
||||
timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
|
||||
CPU_THROTTLE_TIMESLICE_NS / (1 - pct));
|
||||
}
|
||||
|
||||
void cpu_throttle_set(int new_throttle_pct)
|
||||
{
|
||||
/* Ensure throttle percentage is within valid range */
|
||||
new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
|
||||
new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
|
||||
|
||||
atomic_set(&throttle_percentage, new_throttle_pct);
|
||||
|
||||
timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
|
||||
CPU_THROTTLE_TIMESLICE_NS);
|
||||
}
|
||||
|
||||
void cpu_throttle_stop(void)
|
||||
{
|
||||
atomic_set(&throttle_percentage, 0);
|
||||
}
|
||||
|
||||
bool cpu_throttle_active(void)
|
||||
{
|
||||
return (cpu_throttle_get_percentage() != 0);
|
||||
}
|
||||
|
||||
int cpu_throttle_get_percentage(void)
|
||||
{
|
||||
return atomic_read(&throttle_percentage);
|
||||
}
|
||||
|
||||
void cpu_throttle_init(void)
|
||||
{
|
||||
throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
|
||||
cpu_throttle_timer_tick, NULL);
|
||||
}
|
|
@ -61,6 +61,8 @@
|
|||
#include "hw/boards.h"
|
||||
#include "hw/hw.h"
|
||||
|
||||
#include "sysemu/cpu-throttle.h"
|
||||
|
||||
#ifdef CONFIG_LINUX
|
||||
|
||||
#include <sys/prctl.h>
|
||||
|
@ -84,14 +86,6 @@ static QemuMutex qemu_global_mutex;
|
|||
int64_t max_delay;
|
||||
int64_t max_advance;
|
||||
|
||||
/* vcpu throttling controls */
|
||||
static QEMUTimer *throttle_timer;
|
||||
static unsigned int throttle_percentage;
|
||||
|
||||
#define CPU_THROTTLE_PCT_MIN 1
|
||||
#define CPU_THROTTLE_PCT_MAX 99
|
||||
#define CPU_THROTTLE_TIMESLICE_NS 10000000
|
||||
|
||||
bool cpu_is_stopped(CPUState *cpu)
|
||||
{
|
||||
return cpu->stopped || !runstate_is_running();
|
||||
|
@ -738,90 +732,12 @@ static const VMStateDescription vmstate_timers = {
|
|||
}
|
||||
};
|
||||
|
||||
static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
|
||||
{
|
||||
double pct;
|
||||
double throttle_ratio;
|
||||
int64_t sleeptime_ns, endtime_ns;
|
||||
|
||||
if (!cpu_throttle_get_percentage()) {
|
||||
return;
|
||||
}
|
||||
|
||||
pct = (double)cpu_throttle_get_percentage()/100;
|
||||
throttle_ratio = pct / (1 - pct);
|
||||
/* Add 1ns to fix double's rounding error (like 0.9999999...) */
|
||||
sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1);
|
||||
endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
|
||||
while (sleeptime_ns > 0 && !cpu->stop) {
|
||||
if (sleeptime_ns > SCALE_MS) {
|
||||
qemu_cond_timedwait(cpu->halt_cond, &qemu_global_mutex,
|
||||
sleeptime_ns / SCALE_MS);
|
||||
} else {
|
||||
qemu_mutex_unlock_iothread();
|
||||
g_usleep(sleeptime_ns / SCALE_US);
|
||||
qemu_mutex_lock_iothread();
|
||||
}
|
||||
sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
}
|
||||
atomic_set(&cpu->throttle_thread_scheduled, 0);
|
||||
}
|
||||
|
||||
static void cpu_throttle_timer_tick(void *opaque)
|
||||
{
|
||||
CPUState *cpu;
|
||||
double pct;
|
||||
|
||||
/* Stop the timer if needed */
|
||||
if (!cpu_throttle_get_percentage()) {
|
||||
return;
|
||||
}
|
||||
CPU_FOREACH(cpu) {
|
||||
if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
|
||||
async_run_on_cpu(cpu, cpu_throttle_thread,
|
||||
RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
||||
pct = (double)cpu_throttle_get_percentage()/100;
|
||||
timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
|
||||
CPU_THROTTLE_TIMESLICE_NS / (1-pct));
|
||||
}
|
||||
|
||||
void cpu_throttle_set(int new_throttle_pct)
|
||||
{
|
||||
/* Ensure throttle percentage is within valid range */
|
||||
new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
|
||||
new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
|
||||
|
||||
atomic_set(&throttle_percentage, new_throttle_pct);
|
||||
|
||||
timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
|
||||
CPU_THROTTLE_TIMESLICE_NS);
|
||||
}
|
||||
|
||||
void cpu_throttle_stop(void)
|
||||
{
|
||||
atomic_set(&throttle_percentage, 0);
|
||||
}
|
||||
|
||||
bool cpu_throttle_active(void)
|
||||
{
|
||||
return (cpu_throttle_get_percentage() != 0);
|
||||
}
|
||||
|
||||
int cpu_throttle_get_percentage(void)
|
||||
{
|
||||
return atomic_read(&throttle_percentage);
|
||||
}
|
||||
|
||||
void cpu_ticks_init(void)
|
||||
{
|
||||
seqlock_init(&timers_state.vm_clock_seqlock);
|
||||
qemu_spin_init(&timers_state.vm_clock_lock);
|
||||
vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
|
||||
throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
|
||||
cpu_throttle_timer_tick, NULL);
|
||||
cpu_throttle_init();
|
||||
}
|
||||
|
||||
void configure_icount(QemuOpts *opts, Error **errp)
|
||||
|
@ -1017,10 +933,6 @@ void cpu_synchronize_all_states(void)
|
|||
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_synchronize_state(cpu);
|
||||
/* TODO: move to cpu_synchronize_state() */
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_state(cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1030,10 +942,6 @@ void cpu_synchronize_all_post_reset(void)
|
|||
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_synchronize_post_reset(cpu);
|
||||
/* TODO: move to cpu_synchronize_post_reset() */
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_post_reset(cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1043,10 +951,6 @@ void cpu_synchronize_all_post_init(void)
|
|||
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_synchronize_post_init(cpu);
|
||||
/* TODO: move to cpu_synchronize_post_init() */
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_post_init(cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1891,6 +1795,11 @@ void qemu_cond_wait_iothread(QemuCond *cond)
|
|||
qemu_cond_wait(cond, &qemu_global_mutex);
|
||||
}
|
||||
|
||||
void qemu_cond_timedwait_iothread(QemuCond *cond, int ms)
|
||||
{
|
||||
qemu_cond_timedwait(cond, &qemu_global_mutex, ms);
|
||||
}
|
||||
|
||||
static bool all_vcpus_paused(void)
|
||||
{
|
||||
CPUState *cpu;
|
14
softmmu/vl.c
14
softmmu/vl.c
|
@ -389,6 +389,12 @@ static QemuOptsList qemu_msg_opts = {
|
|||
.name = "timestamp",
|
||||
.type = QEMU_OPT_BOOL,
|
||||
},
|
||||
{
|
||||
.name = "guest-name",
|
||||
.type = QEMU_OPT_BOOL,
|
||||
.help = "Prepends guest name for error messages but only if "
|
||||
"-name guest is set otherwise option is ignored\n",
|
||||
},
|
||||
{ /* end of list */ }
|
||||
},
|
||||
};
|
||||
|
@ -1114,6 +1120,7 @@ static void realtime_init(void)
|
|||
static void configure_msg(QemuOpts *opts)
|
||||
{
|
||||
error_with_timestamp = qemu_opt_get_bool(opts, "timestamp", false);
|
||||
error_with_guestname = qemu_opt_get_bool(opts, "guest-name", false);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3499,11 +3506,6 @@ void qemu_init(int argc, char **argv, char **envp)
|
|||
g_slist_free(accel_list);
|
||||
exit(0);
|
||||
}
|
||||
if (optarg && strchr(optarg, ':')) {
|
||||
error_report("Don't use ':' with -accel, "
|
||||
"use -M accel=... for now instead");
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
case QEMU_OPTION_usb:
|
||||
olist = qemu_find_opts("machine");
|
||||
|
@ -3592,6 +3594,8 @@ void qemu_init(int argc, char **argv, char **envp)
|
|||
if (!opts) {
|
||||
exit(1);
|
||||
}
|
||||
/* Capture guest name if -msg guest-name is used later */
|
||||
error_guest_name = qemu_opt_get(opts, "guest");
|
||||
break;
|
||||
case QEMU_OPTION_prom_env:
|
||||
if (nb_prom_envs >= MAX_PROM_ENVS) {
|
||||
|
|
|
@ -3,6 +3,7 @@ obj-$(CONFIG_TCG) += translate.o
|
|||
obj-$(CONFIG_TCG) += bpt_helper.o cc_helper.o excp_helper.o fpu_helper.o
|
||||
obj-$(CONFIG_TCG) += int_helper.o mem_helper.o misc_helper.o mpx_helper.o
|
||||
obj-$(CONFIG_TCG) += seg_helper.o smm_helper.o svm_helper.o
|
||||
obj-$(call lnot,$(CONFIG_TCG)) += tcg-stub.o
|
||||
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
|
||||
ifeq ($(CONFIG_SOFTMMU),y)
|
||||
obj-y += machine.o arch_memory_mapping.o arch_dump.o monitor.o
|
||||
|
|
|
@ -986,8 +986,8 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
|
|||
NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
|
||||
NULL, NULL, NULL, NULL,
|
||||
"avx512-vp2intersect", NULL, "md-clear", NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL /* pconfig */, NULL,
|
||||
NULL, NULL, "serialize", NULL,
|
||||
"tsx-ldtrk", NULL, NULL /* pconfig */, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, "spec-ctrl", "stibp",
|
||||
NULL, "arch-capabilities", "core-capability", "ssbd",
|
||||
|
@ -5968,6 +5968,7 @@ static void x86_cpu_reset(DeviceState *dev)
|
|||
/* init to reset state */
|
||||
|
||||
env->hflags2 |= HF2_GIF_MASK;
|
||||
env->hflags &= ~HF_GUEST_MASK;
|
||||
|
||||
cpu_x86_update_cr0(env, 0x60000010);
|
||||
env->a20_mask = ~0x0;
|
||||
|
@ -6079,9 +6080,6 @@ static void x86_cpu_reset(DeviceState *dev)
|
|||
if (kvm_enabled()) {
|
||||
kvm_arch_reset_vcpu(cpu);
|
||||
}
|
||||
else if (hvf_enabled()) {
|
||||
hvf_reset_vcpu(s);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -6400,7 +6398,7 @@ static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
|
|||
} else if (cpu->env.cpuid_min_level < 0x14) {
|
||||
mark_unavailable_features(cpu, FEAT_7_0_EBX,
|
||||
CPUID_7_0_EBX_INTEL_PT,
|
||||
"Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,level=0x14\"");
|
||||
"Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,min-level=0x14\"");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6511,6 +6509,9 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
|
||||
&cpu->mwait.ecx, &cpu->mwait.edx);
|
||||
env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
|
||||
if (kvm_enabled() && kvm_has_waitpkg()) {
|
||||
env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG;
|
||||
}
|
||||
}
|
||||
if (kvm_enabled() && cpu->ucode_rev == 0) {
|
||||
cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state,
|
||||
|
|
|
@ -777,6 +777,10 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS];
|
|||
#define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3)
|
||||
/* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
|
||||
#define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
|
||||
/* SERIALIZE instruction */
|
||||
#define CPUID_7_0_EDX_SERIALIZE (1U << 14)
|
||||
/* TSX Suspend Load Address Tracking instruction */
|
||||
#define CPUID_7_0_EDX_TSX_LDTRK (1U << 16)
|
||||
/* Speculation Control */
|
||||
#define CPUID_7_0_EDX_SPEC_CTRL (1U << 26)
|
||||
/* Single Thread Indirect Branch Predictors */
|
||||
|
@ -2118,6 +2122,11 @@ static inline bool cpu_has_vmx(CPUX86State *env)
|
|||
return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
|
||||
}
|
||||
|
||||
static inline bool cpu_has_svm(CPUX86State *env)
|
||||
{
|
||||
return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM;
|
||||
}
|
||||
|
||||
/*
|
||||
* In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
|
||||
* Since it was set, CR4.VMXE must remain set as long as vCPU is in
|
||||
|
@ -2143,6 +2152,7 @@ static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
|
|||
/* fpu_helper.c */
|
||||
void update_fp_status(CPUX86State *env);
|
||||
void update_mxcsr_status(CPUX86State *env);
|
||||
void update_mxcsr_from_sse_status(CPUX86State *env);
|
||||
|
||||
static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
|
||||
{
|
||||
|
|
|
@ -262,8 +262,8 @@ static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
|
|||
}
|
||||
ptep = pde | PG_NX_MASK;
|
||||
|
||||
/* if PSE bit is set, then we use a 4MB page */
|
||||
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
|
||||
/* if host cr4 PSE bit is set, then we use a 4MB page */
|
||||
if ((pde & PG_PSE_MASK) && (env->nested_pg_mode & SVM_NPT_PSE)) {
|
||||
page_size = 4096 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
|
||||
|
|
|
@ -2539,6 +2539,7 @@ static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
|
|||
|
||||
static void do_xsave_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
|
||||
{
|
||||
update_mxcsr_from_sse_status(env);
|
||||
cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr), env->mxcsr, ra);
|
||||
cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr_mask), 0x0000ffff, ra);
|
||||
}
|
||||
|
@ -2968,11 +2969,45 @@ void update_mxcsr_status(CPUX86State *env)
|
|||
}
|
||||
set_float_rounding_mode(rnd_type, &env->sse_status);
|
||||
|
||||
/* Set exception flags. */
|
||||
set_float_exception_flags((mxcsr & FPUS_IE ? float_flag_invalid : 0) |
|
||||
(mxcsr & FPUS_ZE ? float_flag_divbyzero : 0) |
|
||||
(mxcsr & FPUS_OE ? float_flag_overflow : 0) |
|
||||
(mxcsr & FPUS_UE ? float_flag_underflow : 0) |
|
||||
(mxcsr & FPUS_PE ? float_flag_inexact : 0),
|
||||
&env->sse_status);
|
||||
|
||||
/* set denormals are zero */
|
||||
set_flush_inputs_to_zero((mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status);
|
||||
|
||||
/* set flush to zero */
|
||||
set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
|
||||
set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->sse_status);
|
||||
}
|
||||
|
||||
void update_mxcsr_from_sse_status(CPUX86State *env)
|
||||
{
|
||||
if (tcg_enabled()) {
|
||||
uint8_t flags = get_float_exception_flags(&env->sse_status);
|
||||
/*
|
||||
* The MXCSR denormal flag has opposite semantics to
|
||||
* float_flag_input_denormal (the softfloat code sets that flag
|
||||
* only when flushing input denormals to zero, but SSE sets it
|
||||
* only when not flushing them to zero), so is not converted
|
||||
* here.
|
||||
*/
|
||||
env->mxcsr |= ((flags & float_flag_invalid ? FPUS_IE : 0) |
|
||||
(flags & float_flag_divbyzero ? FPUS_ZE : 0) |
|
||||
(flags & float_flag_overflow ? FPUS_OE : 0) |
|
||||
(flags & float_flag_underflow ? FPUS_UE : 0) |
|
||||
(flags & float_flag_inexact ? FPUS_PE : 0) |
|
||||
(flags & float_flag_output_denormal ? FPUS_UE | FPUS_PE :
|
||||
0));
|
||||
}
|
||||
}
|
||||
|
||||
void helper_update_mxcsr(CPUX86State *env)
|
||||
{
|
||||
update_mxcsr_from_sse_status(env);
|
||||
}
|
||||
|
||||
void helper_ldmxcsr(CPUX86State *env, uint32_t val)
|
||||
|
|
|
@ -184,6 +184,7 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
|
|||
return gdb_get_reg32(mem_buf, 0); /* fop */
|
||||
|
||||
case IDX_MXCSR_REG:
|
||||
update_mxcsr_from_sse_status(env);
|
||||
return gdb_get_reg32(mem_buf, env->mxcsr);
|
||||
|
||||
case IDX_CTL_CR0_REG:
|
||||
|
|
|
@ -370,10 +370,11 @@ void x86_cpu_dump_local_apic_state(CPUState *cs, int flags)
|
|||
dump_apic_lvt("LVTTHMR", lvt[APIC_LVT_THERMAL], false);
|
||||
dump_apic_lvt("LVTT", lvt[APIC_LVT_TIMER], true);
|
||||
|
||||
qemu_printf("Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
|
||||
qemu_printf("Timer\t DCR=0x%x (divide by %u) initial_count = %u"
|
||||
" current_count = %u\n",
|
||||
s->divide_conf & APIC_DCR_MASK,
|
||||
divider_conf(s->divide_conf),
|
||||
s->initial_count);
|
||||
s->initial_count, apic_get_current_count(s));
|
||||
|
||||
qemu_printf("SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
|
||||
s->spurious_vec,
|
||||
|
@ -544,6 +545,7 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
|||
for(i = 0; i < 8; i++) {
|
||||
fptag |= ((!env->fptags[i]) << i);
|
||||
}
|
||||
update_mxcsr_from_sse_status(env);
|
||||
qemu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
|
||||
env->fpuc,
|
||||
(env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
|
||||
|
|
|
@ -207,6 +207,7 @@ DEF_HELPER_FLAGS_2(pext, TCG_CALL_NO_RWG_SE, tl, tl, tl)
|
|||
/* MMX/SSE */
|
||||
|
||||
DEF_HELPER_2(ldmxcsr, void, env, i32)
|
||||
DEF_HELPER_1(update_mxcsr, void, env)
|
||||
DEF_HELPER_1(enter_mmx, void, env)
|
||||
DEF_HELPER_1(emms, void, env)
|
||||
DEF_HELPER_3(movq, void, env, ptr, ptr)
|
||||
|
|
|
@ -282,47 +282,54 @@ void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
|
|||
}
|
||||
}
|
||||
|
||||
/* TODO: synchronize vcpu state */
|
||||
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
CPUState *cpu_state = cpu;
|
||||
if (cpu_state->vcpu_dirty == 0) {
|
||||
hvf_get_registers(cpu_state);
|
||||
}
|
||||
|
||||
cpu_state->vcpu_dirty = 1;
|
||||
}
|
||||
|
||||
void hvf_cpu_synchronize_state(CPUState *cpu_state)
|
||||
{
|
||||
if (cpu_state->vcpu_dirty == 0) {
|
||||
run_on_cpu(cpu_state, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
||||
if (!cpu->vcpu_dirty) {
|
||||
hvf_get_registers(cpu);
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
|
||||
void hvf_cpu_synchronize_state(CPUState *cpu)
|
||||
{
|
||||
CPUState *cpu_state = cpu;
|
||||
hvf_put_registers(cpu_state);
|
||||
cpu_state->vcpu_dirty = false;
|
||||
if (!cpu->vcpu_dirty) {
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void hvf_cpu_synchronize_post_reset(CPUState *cpu_state)
|
||||
static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu,
|
||||
run_on_cpu_data arg)
|
||||
{
|
||||
run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
|
||||
hvf_put_registers(cpu);
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
void hvf_cpu_synchronize_post_reset(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_post_init(CPUState *cpu,
|
||||
run_on_cpu_data arg)
|
||||
{
|
||||
CPUState *cpu_state = cpu;
|
||||
hvf_put_registers(cpu_state);
|
||||
cpu_state->vcpu_dirty = false;
|
||||
hvf_put_registers(cpu);
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
void hvf_cpu_synchronize_post_init(CPUState *cpu_state)
|
||||
void hvf_cpu_synchronize_post_init(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_pre_loadvm(CPUState *cpu,
|
||||
run_on_cpu_data arg)
|
||||
{
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
|
||||
void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
|
||||
|
@ -441,96 +448,6 @@ static MemoryListener hvf_memory_listener = {
|
|||
.log_sync = hvf_log_sync,
|
||||
};
|
||||
|
||||
void hvf_reset_vcpu(CPUState *cpu) {
|
||||
uint64_t pdpte[4] = {0, 0, 0, 0};
|
||||
int i;
|
||||
|
||||
/* TODO: this shouldn't be needed; there is already a call to
|
||||
* cpu_synchronize_all_post_reset in vl.c
|
||||
*/
|
||||
wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, 0);
|
||||
|
||||
/* Initialize PDPTE */
|
||||
for (i = 0; i < 4; i++) {
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
|
||||
}
|
||||
|
||||
macvm_set_cr0(cpu->hvf_fd, 0x60000010);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_CR4_MASK, CR4_VMXE_MASK);
|
||||
wvmcs(cpu->hvf_fd, VMCS_CR4_SHADOW, 0x0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CR4, CR4_VMXE_MASK);
|
||||
|
||||
/* set VMCS guest state fields */
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_SELECTOR, 0xf000);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_LIMIT, 0xffff);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_ACCESS_RIGHTS, 0x9b);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_BASE, 0xffff0000);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_LIMIT, 0xffff);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_ACCESS_RIGHTS, 0x93);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_LIMIT, 0xffff);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_ACCESS_RIGHTS, 0x93);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_LIMIT, 0xffff);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_ACCESS_RIGHTS, 0x93);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_LIMIT, 0xffff);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_ACCESS_RIGHTS, 0x93);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_LIMIT, 0xffff);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_ACCESS_RIGHTS, 0x93);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x10000);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_LIMIT, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_ACCESS_RIGHTS, 0x83);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE, 0);
|
||||
|
||||
/*wvmcs(cpu->hvf_fd, VMCS_GUEST_CR2, 0x0);*/
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, 0x0);
|
||||
|
||||
wreg(cpu->hvf_fd, HV_X86_RIP, 0xfff0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RDX, 0x623);
|
||||
wreg(cpu->hvf_fd, HV_X86_RFLAGS, 0x2);
|
||||
wreg(cpu->hvf_fd, HV_X86_RSP, 0x0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RAX, 0x0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RBX, 0x0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RCX, 0x0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RSI, 0x0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RDI, 0x0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RBP, 0x0);
|
||||
|
||||
for (int i = 0; i < 8; i++) {
|
||||
wreg(cpu->hvf_fd, HV_X86_R8 + i, 0x0);
|
||||
}
|
||||
|
||||
hv_vcpu_invalidate_tlb(cpu->hvf_fd);
|
||||
hv_vcpu_flush(cpu->hvf_fd);
|
||||
}
|
||||
|
||||
void hvf_vcpu_destroy(CPUState *cpu)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
|
|
|
@ -121,7 +121,9 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
|
|||
uint64_t pdpte[4] = {0, 0, 0, 0};
|
||||
uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
|
||||
uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
|
||||
uint64_t changed_cr0 = old_cr0 ^ cr0;
|
||||
uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET;
|
||||
uint64_t entry_ctls;
|
||||
|
||||
if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&
|
||||
!(efer & MSR_EFER_LME)) {
|
||||
|
@ -138,12 +140,16 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
|
|||
wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
|
||||
|
||||
if (efer & MSR_EFER_LME) {
|
||||
if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG)) {
|
||||
enter_long_mode(vcpu, cr0, efer);
|
||||
}
|
||||
if (/*(old_cr0 & CR0_PG) &&*/ !(cr0 & CR0_PG)) {
|
||||
exit_long_mode(vcpu, cr0, efer);
|
||||
if (changed_cr0 & CR0_PG) {
|
||||
if (cr0 & CR0_PG) {
|
||||
enter_long_mode(vcpu, cr0, efer);
|
||||
} else {
|
||||
exit_long_mode(vcpu, cr0, efer);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
|
||||
wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
|
||||
}
|
||||
|
||||
/* Filter new CR0 after we are finished examining it above. */
|
||||
|
@ -173,6 +179,7 @@ static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
|
|||
|
||||
/* BUG, should take considering overlap.. */
|
||||
wreg(cpu->hvf_fd, HV_X86_RIP, rip);
|
||||
env->eip = rip;
|
||||
|
||||
/* after moving forward in rip, we need to clean INTERRUPTABILITY */
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
|
||||
|
|
|
@ -411,12 +411,6 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
|
|||
if (host_tsx_blacklisted()) {
|
||||
ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
|
||||
}
|
||||
} else if (function == 7 && index == 0 && reg == R_ECX) {
|
||||
if (enable_cpu_pm) {
|
||||
ret |= CPUID_7_0_ECX_WAITPKG;
|
||||
} else {
|
||||
ret &= ~CPUID_7_0_ECX_WAITPKG;
|
||||
}
|
||||
} else if (function == 7 && index == 0 && reg == R_EDX) {
|
||||
/*
|
||||
* Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
|
||||
|
@ -1840,16 +1834,18 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
|||
if (max_nested_state_len > 0) {
|
||||
assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
|
||||
|
||||
if (cpu_has_vmx(env)) {
|
||||
if (cpu_has_vmx(env) || cpu_has_svm(env)) {
|
||||
struct kvm_vmx_nested_state_hdr *vmx_hdr;
|
||||
|
||||
env->nested_state = g_malloc0(max_nested_state_len);
|
||||
env->nested_state->size = max_nested_state_len;
|
||||
env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
|
||||
|
||||
vmx_hdr = &env->nested_state->hdr.vmx;
|
||||
vmx_hdr->vmxon_pa = -1ull;
|
||||
vmx_hdr->vmcs12_pa = -1ull;
|
||||
if (cpu_has_vmx(env)) {
|
||||
vmx_hdr = &env->nested_state->hdr.vmx;
|
||||
vmx_hdr->vmxon_pa = -1ull;
|
||||
vmx_hdr->vmcs12_pa = -1ull;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3873,6 +3869,20 @@ static int kvm_put_nested_state(X86CPU *cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy flags that are affected by reset from env->hflags and env->hflags2.
|
||||
*/
|
||||
if (env->hflags & HF_GUEST_MASK) {
|
||||
env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE;
|
||||
} else {
|
||||
env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE;
|
||||
}
|
||||
if (env->hflags2 & HF2_GIF_MASK) {
|
||||
env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET;
|
||||
} else {
|
||||
env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET;
|
||||
}
|
||||
|
||||
assert(env->nested_state->size <= max_nested_state_len);
|
||||
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
|
||||
}
|
||||
|
@ -3901,11 +3911,19 @@ static int kvm_get_nested_state(X86CPU *cpu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy flags that are affected by reset to env->hflags and env->hflags2.
|
||||
*/
|
||||
if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
|
||||
env->hflags |= HF_GUEST_MASK;
|
||||
} else {
|
||||
env->hflags &= ~HF_GUEST_MASK;
|
||||
}
|
||||
if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) {
|
||||
env->hflags2 |= HF2_GIF_MASK;
|
||||
} else {
|
||||
env->hflags2 &= ~HF2_GIF_MASK;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3917,6 +3935,12 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
|
|||
|
||||
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
|
||||
|
||||
/* must be before kvm_put_nested_state so that EFER.SVME is set */
|
||||
ret = kvm_put_sregs(x86_cpu);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (level >= KVM_PUT_RESET_STATE) {
|
||||
ret = kvm_put_nested_state(x86_cpu);
|
||||
if (ret < 0) {
|
||||
|
@ -3950,10 +3974,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
|
|||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
ret = kvm_put_sregs(x86_cpu);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
/* must be before kvm_put_msrs */
|
||||
ret = kvm_inject_mce_oldstyle(x86_cpu);
|
||||
if (ret < 0) {
|
||||
|
@ -4704,3 +4724,8 @@ int kvm_arch_msi_data_to_gsi(uint32_t data)
|
|||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
bool kvm_has_waitpkg(void)
|
||||
{
|
||||
return has_msr_umwait;
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ void kvm_put_apicbase(X86CPU *cpu, uint64_t value);
|
|||
|
||||
bool kvm_enable_x2apic(void);
|
||||
bool kvm_has_x2apic_api(void);
|
||||
bool kvm_has_waitpkg(void);
|
||||
|
||||
bool kvm_hv_vpindex_settable(void);
|
||||
|
||||
|
|
|
@ -1071,13 +1071,41 @@ static const VMStateDescription vmstate_vmx_nested_state = {
|
|||
}
|
||||
};
|
||||
|
||||
static bool svm_nested_state_needed(void *opaque)
|
||||
{
|
||||
struct kvm_nested_state *nested_state = opaque;
|
||||
|
||||
/*
|
||||
* HF_GUEST_MASK and HF2_GIF_MASK are already serialized
|
||||
* via hflags and hflags2, all that's left is the opaque
|
||||
* nested state blob.
|
||||
*/
|
||||
return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM &&
|
||||
nested_state->size > offsetof(struct kvm_nested_state, data));
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_svm_nested_state = {
|
||||
.name = "cpu/kvm_nested_state/svm",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = svm_nested_state_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_U64(hdr.svm.vmcb_pa, struct kvm_nested_state),
|
||||
VMSTATE_UINT8_ARRAY(data.svm[0].vmcb12,
|
||||
struct kvm_nested_state,
|
||||
KVM_STATE_NESTED_SVM_VMCB_SIZE),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static bool nested_state_needed(void *opaque)
|
||||
{
|
||||
X86CPU *cpu = opaque;
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
return (env->nested_state &&
|
||||
vmx_nested_state_needed(env->nested_state));
|
||||
(vmx_nested_state_needed(env->nested_state) ||
|
||||
svm_nested_state_needed(env->nested_state)));
|
||||
}
|
||||
|
||||
static int nested_state_post_load(void *opaque, int version_id)
|
||||
|
@ -1139,6 +1167,7 @@ static const VMStateDescription vmstate_kvm_nested_state = {
|
|||
},
|
||||
.subsections = (const VMStateDescription*[]) {
|
||||
&vmstate_vmx_nested_state,
|
||||
&vmstate_svm_nested_state,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
|
|
@ -726,13 +726,5 @@ SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
|
|||
|
||||
SevCapability *qmp_query_sev_capabilities(Error **errp)
|
||||
{
|
||||
SevCapability *data;
|
||||
|
||||
data = sev_get_capabilities();
|
||||
if (!data) {
|
||||
error_setg(errp, "SEV feature is not available");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return data;
|
||||
return sev_get_capabilities(errp);
|
||||
}
|
||||
|
|
|
@ -843,6 +843,7 @@ int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s)
|
|||
|
||||
void helper_rsqrtps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
||||
{
|
||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
||||
d->ZMM_S(0) = float32_div(float32_one,
|
||||
float32_sqrt(s->ZMM_S(0), &env->sse_status),
|
||||
&env->sse_status);
|
||||
|
@ -855,26 +856,33 @@ void helper_rsqrtps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
|||
d->ZMM_S(3) = float32_div(float32_one,
|
||||
float32_sqrt(s->ZMM_S(3), &env->sse_status),
|
||||
&env->sse_status);
|
||||
set_float_exception_flags(old_flags, &env->sse_status);
|
||||
}
|
||||
|
||||
void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
||||
{
|
||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
||||
d->ZMM_S(0) = float32_div(float32_one,
|
||||
float32_sqrt(s->ZMM_S(0), &env->sse_status),
|
||||
&env->sse_status);
|
||||
set_float_exception_flags(old_flags, &env->sse_status);
|
||||
}
|
||||
|
||||
void helper_rcpps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
||||
{
|
||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
||||
d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
|
||||
d->ZMM_S(1) = float32_div(float32_one, s->ZMM_S(1), &env->sse_status);
|
||||
d->ZMM_S(2) = float32_div(float32_one, s->ZMM_S(2), &env->sse_status);
|
||||
d->ZMM_S(3) = float32_div(float32_one, s->ZMM_S(3), &env->sse_status);
|
||||
set_float_exception_flags(old_flags, &env->sse_status);
|
||||
}
|
||||
|
||||
void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *s)
|
||||
{
|
||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
||||
d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
|
||||
set_float_exception_flags(old_flags, &env->sse_status);
|
||||
}
|
||||
|
||||
static inline uint64_t helper_extrq(uint64_t src, int shift, int len)
|
||||
|
@ -1764,6 +1772,7 @@ void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
|
|||
void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
||||
uint32_t mode)
|
||||
{
|
||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
||||
signed char prev_rounding_mode;
|
||||
|
||||
prev_rounding_mode = env->sse_status.float_rounding_mode;
|
||||
|
@ -1789,19 +1798,18 @@ void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
|||
d->ZMM_S(2) = float32_round_to_int(s->ZMM_S(2), &env->sse_status);
|
||||
d->ZMM_S(3) = float32_round_to_int(s->ZMM_S(3), &env->sse_status);
|
||||
|
||||
#if 0 /* TODO */
|
||||
if (mode & (1 << 3)) {
|
||||
if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) {
|
||||
set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
|
||||
~float_flag_inexact,
|
||||
&env->sse_status);
|
||||
}
|
||||
#endif
|
||||
env->sse_status.float_rounding_mode = prev_rounding_mode;
|
||||
}
|
||||
|
||||
void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
||||
uint32_t mode)
|
||||
{
|
||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
||||
signed char prev_rounding_mode;
|
||||
|
||||
prev_rounding_mode = env->sse_status.float_rounding_mode;
|
||||
|
@ -1825,19 +1833,18 @@ void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
|||
d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status);
|
||||
d->ZMM_D(1) = float64_round_to_int(s->ZMM_D(1), &env->sse_status);
|
||||
|
||||
#if 0 /* TODO */
|
||||
if (mode & (1 << 3)) {
|
||||
if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) {
|
||||
set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
|
||||
~float_flag_inexact,
|
||||
&env->sse_status);
|
||||
}
|
||||
#endif
|
||||
env->sse_status.float_rounding_mode = prev_rounding_mode;
|
||||
}
|
||||
|
||||
void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
||||
uint32_t mode)
|
||||
{
|
||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
||||
signed char prev_rounding_mode;
|
||||
|
||||
prev_rounding_mode = env->sse_status.float_rounding_mode;
|
||||
|
@ -1860,19 +1867,18 @@ void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
|||
|
||||
d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status);
|
||||
|
||||
#if 0 /* TODO */
|
||||
if (mode & (1 << 3)) {
|
||||
if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) {
|
||||
set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
|
||||
~float_flag_inexact,
|
||||
&env->sse_status);
|
||||
}
|
||||
#endif
|
||||
env->sse_status.float_rounding_mode = prev_rounding_mode;
|
||||
}
|
||||
|
||||
void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
||||
uint32_t mode)
|
||||
{
|
||||
uint8_t old_flags = get_float_exception_flags(&env->sse_status);
|
||||
signed char prev_rounding_mode;
|
||||
|
||||
prev_rounding_mode = env->sse_status.float_rounding_mode;
|
||||
|
@ -1895,13 +1901,11 @@ void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
|||
|
||||
d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status);
|
||||
|
||||
#if 0 /* TODO */
|
||||
if (mode & (1 << 3)) {
|
||||
if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) {
|
||||
set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
|
||||
~float_flag_inexact,
|
||||
&env->sse_status);
|
||||
}
|
||||
#endif
|
||||
env->sse_status.float_rounding_mode = prev_rounding_mode;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,8 @@ char *sev_get_launch_measurement(void)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
SevCapability *sev_get_capabilities(void)
|
||||
SevCapability *sev_get_capabilities(Error **errp)
|
||||
{
|
||||
error_setg(errp, "SEV is not available in this QEMU");
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -399,7 +399,7 @@ sev_get_info(void)
|
|||
|
||||
static int
|
||||
sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain,
|
||||
size_t *cert_chain_len)
|
||||
size_t *cert_chain_len, Error **errp)
|
||||
{
|
||||
guchar *pdh_data = NULL;
|
||||
guchar *cert_chain_data = NULL;
|
||||
|
@ -410,8 +410,8 @@ sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain,
|
|||
r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
|
||||
if (r < 0) {
|
||||
if (err != SEV_RET_INVALID_LEN) {
|
||||
error_report("failed to export PDH cert ret=%d fw_err=%d (%s)",
|
||||
r, err, fw_error_to_str(err));
|
||||
error_setg(errp, "failed to export PDH cert ret=%d fw_err=%d (%s)",
|
||||
r, err, fw_error_to_str(err));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -423,8 +423,8 @@ sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain,
|
|||
|
||||
r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
|
||||
if (r < 0) {
|
||||
error_report("failed to export PDH cert ret=%d fw_err=%d (%s)",
|
||||
r, err, fw_error_to_str(err));
|
||||
error_setg(errp, "failed to export PDH cert ret=%d fw_err=%d (%s)",
|
||||
r, err, fw_error_to_str(err));
|
||||
goto e_free;
|
||||
}
|
||||
|
||||
|
@ -441,7 +441,7 @@ e_free:
|
|||
}
|
||||
|
||||
SevCapability *
|
||||
sev_get_capabilities(void)
|
||||
sev_get_capabilities(Error **errp)
|
||||
{
|
||||
SevCapability *cap = NULL;
|
||||
guchar *pdh_data = NULL;
|
||||
|
@ -450,15 +450,24 @@ sev_get_capabilities(void)
|
|||
uint32_t ebx;
|
||||
int fd;
|
||||
|
||||
if (!kvm_enabled()) {
|
||||
error_setg(errp, "KVM not enabled");
|
||||
return NULL;
|
||||
}
|
||||
if (kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, NULL) < 0) {
|
||||
error_setg(errp, "SEV is not enabled in KVM");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
fd = open(DEFAULT_SEV_DEVICE, O_RDWR);
|
||||
if (fd < 0) {
|
||||
error_report("%s: Failed to open %s '%s'", __func__,
|
||||
DEFAULT_SEV_DEVICE, strerror(errno));
|
||||
error_setg_errno(errp, errno, "Failed to open %s",
|
||||
DEFAULT_SEV_DEVICE);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (sev_get_pdh_info(fd, &pdh_data, &pdh_len,
|
||||
&cert_chain_data, &cert_chain_len)) {
|
||||
&cert_chain_data, &cert_chain_len, errp)) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,6 @@ extern SevInfo *sev_get_info(void);
|
|||
extern uint32_t sev_get_cbit_position(void);
|
||||
extern uint32_t sev_get_reduced_phys_bits(void);
|
||||
extern char *sev_get_launch_measurement(void);
|
||||
extern SevCapability *sev_get_capabilities(void);
|
||||
extern SevCapability *sev_get_capabilities(Error **errp);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -135,6 +135,7 @@
|
|||
#define SVM_NPT_PAE (1 << 0)
|
||||
#define SVM_NPT_LMA (1 << 1)
|
||||
#define SVM_NPT_NXE (1 << 2)
|
||||
#define SVM_NPT_PSE (1 << 3)
|
||||
|
||||
#define SVM_NPTEXIT_P (1ULL << 0)
|
||||
#define SVM_NPTEXIT_RW (1ULL << 1)
|
||||
|
|
|
@ -209,16 +209,21 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
|||
|
||||
nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.nested_ctl));
|
||||
|
||||
env->nested_pg_mode = 0;
|
||||
|
||||
if (nested_ctl & SVM_NPT_ENABLED) {
|
||||
env->nested_cr3 = x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.nested_cr3));
|
||||
env->hflags2 |= HF2_NPT_MASK;
|
||||
|
||||
env->nested_pg_mode = 0;
|
||||
if (env->cr[4] & CR4_PAE_MASK) {
|
||||
env->nested_pg_mode |= SVM_NPT_PAE;
|
||||
}
|
||||
if (env->cr[4] & CR4_PSE_MASK) {
|
||||
env->nested_pg_mode |= SVM_NPT_PSE;
|
||||
}
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
env->nested_pg_mode |= SVM_NPT_LMA;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
|
||||
void update_mxcsr_from_sse_status(CPUX86State *env)
|
||||
{
|
||||
}
|
|
@ -1128,9 +1128,6 @@ static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
|
|||
|
||||
static inline void gen_ins(DisasContext *s, MemOp ot)
|
||||
{
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
}
|
||||
gen_string_movl_A0_EDI(s);
|
||||
/* Note: we must do this dummy write first to be restartable in
|
||||
case of page fault. */
|
||||
|
@ -1143,16 +1140,10 @@ static inline void gen_ins(DisasContext *s, MemOp ot)
|
|||
gen_op_movl_T0_Dshift(s, ot);
|
||||
gen_op_add_reg_T0(s, s->aflag, R_EDI);
|
||||
gen_bpt_io(s, s->tmp2_i32, ot);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void gen_outs(DisasContext *s, MemOp ot)
|
||||
{
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
}
|
||||
gen_string_movl_A0_ESI(s);
|
||||
gen_op_ld_v(s, ot, s->T0, s->A0);
|
||||
|
||||
|
@ -1163,9 +1154,6 @@ static inline void gen_outs(DisasContext *s, MemOp ot)
|
|||
gen_op_movl_T0_Dshift(s, ot);
|
||||
gen_op_add_reg_T0(s, s->aflag, R_ESI);
|
||||
gen_bpt_io(s, s->tmp2_i32, ot);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
}
|
||||
}
|
||||
|
||||
/* same method as Valgrind : we generate jumps to current or next
|
||||
|
@ -6400,8 +6388,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]);
|
||||
gen_check_io(s, ot, pc_start - s->cs_base,
|
||||
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
}
|
||||
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
|
||||
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
|
||||
/* jump generated by gen_repz_ins */
|
||||
} else {
|
||||
gen_ins(s, ot);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
|
@ -6415,8 +6407,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]);
|
||||
gen_check_io(s, ot, pc_start - s->cs_base,
|
||||
svm_is_rep(prefixes) | 4);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
}
|
||||
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
|
||||
gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
|
||||
/* jump generated by gen_repz_outs */
|
||||
} else {
|
||||
gen_outs(s, ot);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
|
@ -7583,12 +7579,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
CASE_MODRM_OP(4): /* smsw */
|
||||
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
|
||||
tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0]));
|
||||
if (CODE64(s)) {
|
||||
mod = (modrm >> 6) & 3;
|
||||
ot = (mod != 3 ? MO_16 : s->dflag);
|
||||
} else {
|
||||
ot = MO_16;
|
||||
}
|
||||
/*
|
||||
* In 32-bit mode, the higher 16 bits of the destination
|
||||
* register are undefined. In practice CR0[31:0] is stored
|
||||
* just like in 64-bit mode.
|
||||
*/
|
||||
mod = (modrm >> 6) & 3;
|
||||
ot = (mod != 3 ? MO_16 : s->dflag);
|
||||
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
||||
break;
|
||||
case 0xee: /* rdpkru */
|
||||
|
@ -8039,7 +8036,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_helper_read_crN(s->T0, cpu_env, tcg_const_i32(reg));
|
||||
gen_op_mov_reg_v(s, ot, rm, s->T0);
|
||||
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_jmp(s, s->pc - s->cs_base);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -8157,6 +8154,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
|
|||
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
|
||||
break;
|
||||
}
|
||||
gen_helper_update_mxcsr(cpu_env);
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr));
|
||||
gen_op_st_v(s, MO_32, s->T0, s->A0);
|
||||
|
|
|
@ -637,7 +637,7 @@ define do_test_tap
|
|||
{ export MALLOC_PERTURB_=$${MALLOC_PERTURB_:-$$(( $${RANDOM:-0} % 255 + 1))} $2; \
|
||||
$(foreach COMMAND, $1, \
|
||||
$(COMMAND) -m=$(SPEED) -k --tap < /dev/null \
|
||||
| sed "s/^[a-z][a-z]* [0-9]* /&$(notdir $(COMMAND)) /" || true; ) } \
|
||||
| sed "s/^\(not \)\?ok [0-9]* /&$(notdir $(COMMAND)) /" || true; ) } \
|
||||
| ./scripts/tap-merge.pl | tee "$@" \
|
||||
| ./scripts/tap-driver.pl $(if $(V),, --show-failures-only), \
|
||||
"TAP","$@")
|
||||
|
|
|
@ -200,16 +200,116 @@ static void add_query_tests(QmpSchema *schema)
|
|||
}
|
||||
}
|
||||
|
||||
static void test_object_add_without_props(void)
|
||||
static void test_object_add_failure_modes(void)
|
||||
{
|
||||
QTestState *qts;
|
||||
QDict *resp;
|
||||
|
||||
/* attempt to create an object without props */
|
||||
qts = qtest_init(common_args);
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-add', 'arguments':"
|
||||
" {'qom-type': 'memory-backend-ram', 'id': 'ram1' } }");
|
||||
" {'qom-type': 'memory-backend-ram', 'id': 'ram1' } }");
|
||||
g_assert_nonnull(resp);
|
||||
qmp_assert_error_class(resp, "GenericError");
|
||||
|
||||
/* attempt to create an object without qom-type */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-add', 'arguments':"
|
||||
" {'id': 'ram1' } }");
|
||||
g_assert_nonnull(resp);
|
||||
qmp_assert_error_class(resp, "GenericError");
|
||||
|
||||
/* attempt to delete an object that does not exist */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-del', 'arguments':"
|
||||
" {'id': 'ram1' } }");
|
||||
g_assert_nonnull(resp);
|
||||
qmp_assert_error_class(resp, "GenericError");
|
||||
|
||||
/* attempt to create 2 objects with duplicate id */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-add', 'arguments':"
|
||||
" {'qom-type': 'memory-backend-ram', 'id': 'ram1',"
|
||||
" 'props': {'size': 1048576 } } }");
|
||||
g_assert_nonnull(resp);
|
||||
g_assert(qdict_haskey(resp, "return"));
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-add', 'arguments':"
|
||||
" {'qom-type': 'memory-backend-ram', 'id': 'ram1',"
|
||||
" 'props': {'size': 1048576 } } }");
|
||||
g_assert_nonnull(resp);
|
||||
qmp_assert_error_class(resp, "GenericError");
|
||||
|
||||
/* delete ram1 object */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-del', 'arguments':"
|
||||
" {'id': 'ram1' } }");
|
||||
g_assert_nonnull(resp);
|
||||
g_assert(qdict_haskey(resp, "return"));
|
||||
|
||||
/* attempt to create an object with a property of a wrong type */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-add', 'arguments':"
|
||||
" {'qom-type': 'memory-backend-ram', 'id': 'ram1',"
|
||||
" 'props': {'size': '1048576' } } }");
|
||||
g_assert_nonnull(resp);
|
||||
/* now do it right */
|
||||
qmp_assert_error_class(resp, "GenericError");
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-add', 'arguments':"
|
||||
" {'qom-type': 'memory-backend-ram', 'id': 'ram1',"
|
||||
" 'props': {'size': 1048576 } } }");
|
||||
g_assert_nonnull(resp);
|
||||
g_assert(qdict_haskey(resp, "return"));
|
||||
|
||||
/* delete ram1 object */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-del', 'arguments':"
|
||||
" {'id': 'ram1' } }");
|
||||
g_assert_nonnull(resp);
|
||||
g_assert(qdict_haskey(resp, "return"));
|
||||
|
||||
/* attempt to create an object without the id */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-add', 'arguments':"
|
||||
" {'qom-type': 'memory-backend-ram',"
|
||||
" 'props': {'size': 1048576 } } }");
|
||||
g_assert_nonnull(resp);
|
||||
qmp_assert_error_class(resp, "GenericError");
|
||||
/* now do it right */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-add', 'arguments':"
|
||||
" {'qom-type': 'memory-backend-ram', 'id': 'ram1',"
|
||||
" 'props': {'size': 1048576 } } }");
|
||||
g_assert_nonnull(resp);
|
||||
g_assert(qdict_haskey(resp, "return"));
|
||||
|
||||
/* delete ram1 object */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-del', 'arguments':"
|
||||
" {'id': 'ram1' } }");
|
||||
g_assert_nonnull(resp);
|
||||
g_assert(qdict_haskey(resp, "return"));
|
||||
|
||||
/* attempt to set a non existing property */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-add', 'arguments':"
|
||||
" {'qom-type': 'memory-backend-ram', 'id': 'ram1',"
|
||||
" 'props': {'sized': 1048576 } } }");
|
||||
g_assert_nonnull(resp);
|
||||
qmp_assert_error_class(resp, "GenericError");
|
||||
/* now do it right */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-add', 'arguments':"
|
||||
" {'qom-type': 'memory-backend-ram', 'id': 'ram1',"
|
||||
" 'props': {'size': 1048576 } } }");
|
||||
g_assert_nonnull(resp);
|
||||
g_assert(qdict_haskey(resp, "return"));
|
||||
|
||||
/* delete ram1 object without id */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-del', 'arguments':"
|
||||
" {'ida': 'ram1' } }");
|
||||
g_assert_nonnull(resp);
|
||||
|
||||
/* delete ram1 object */
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-del', 'arguments':"
|
||||
" {'id': 'ram1' } }");
|
||||
g_assert_nonnull(resp);
|
||||
g_assert(qdict_haskey(resp, "return"));
|
||||
|
||||
/* delete ram1 object that does not exist anymore*/
|
||||
resp = qtest_qmp(qts, "{'execute': 'object-del', 'arguments':"
|
||||
" {'id': 'ram1' } }");
|
||||
g_assert_nonnull(resp);
|
||||
qmp_assert_error_class(resp, "GenericError");
|
||||
|
||||
qtest_quit(qts);
|
||||
}
|
||||
|
||||
|
@ -223,9 +323,8 @@ int main(int argc, char *argv[])
|
|||
qmp_schema_init(&schema);
|
||||
add_query_tests(&schema);
|
||||
|
||||
qtest_add_func("qmp/object-add-without-props",
|
||||
test_object_add_without_props);
|
||||
/* TODO: add coverage of generic object-add failure modes */
|
||||
qtest_add_func("qmp/object-add-failure-modes",
|
||||
test_object_add_failure_modes);
|
||||
|
||||
ret = g_test_run();
|
||||
|
||||
|
|
|
@ -10,6 +10,10 @@ ALL_X86_TESTS=$(I386_SRCS:.c=)
|
|||
SKIP_I386_TESTS=test-i386-ssse3
|
||||
X86_64_TESTS:=$(filter test-i386-ssse3, $(ALL_X86_TESTS))
|
||||
|
||||
test-i386-sse-exceptions: CFLAGS += -msse4.1 -mfpmath=sse
|
||||
run-test-i386-sse-exceptions: QEMU_OPTS += -cpu max
|
||||
run-plugin-test-i386-sse-exceptions-%: QEMU_OPTS += -cpu max
|
||||
|
||||
test-i386-pcmpistri: CFLAGS += -msse4.2
|
||||
run-test-i386-pcmpistri: QEMU_OPTS += -cpu max
|
||||
run-plugin-test-i386-pcmpistri-%: QEMU_OPTS += -cpu max
|
||||
|
|
|
@ -0,0 +1,813 @@
|
|||
/* Test SSE exceptions. */
|
||||
|
||||
#include <float.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
volatile float f_res;
|
||||
volatile double d_res;
|
||||
|
||||
volatile float f_snan = __builtin_nansf("");
|
||||
volatile float f_half = 0.5f;
|
||||
volatile float f_third = 1.0f / 3.0f;
|
||||
volatile float f_nan = __builtin_nanl("");
|
||||
volatile float f_inf = __builtin_inff();
|
||||
volatile float f_ninf = -__builtin_inff();
|
||||
volatile float f_one = 1.0f;
|
||||
volatile float f_two = 2.0f;
|
||||
volatile float f_zero = 0.0f;
|
||||
volatile float f_nzero = -0.0f;
|
||||
volatile float f_min = FLT_MIN;
|
||||
volatile float f_true_min = 0x1p-149f;
|
||||
volatile float f_max = FLT_MAX;
|
||||
volatile float f_nmax = -FLT_MAX;
|
||||
|
||||
volatile double d_snan = __builtin_nans("");
|
||||
volatile double d_half = 0.5;
|
||||
volatile double d_third = 1.0 / 3.0;
|
||||
volatile double d_nan = __builtin_nan("");
|
||||
volatile double d_inf = __builtin_inf();
|
||||
volatile double d_ninf = -__builtin_inf();
|
||||
volatile double d_one = 1.0;
|
||||
volatile double d_two = 2.0;
|
||||
volatile double d_zero = 0.0;
|
||||
volatile double d_nzero = -0.0;
|
||||
volatile double d_min = DBL_MIN;
|
||||
volatile double d_true_min = 0x1p-1074;
|
||||
volatile double d_max = DBL_MAX;
|
||||
volatile double d_nmax = -DBL_MAX;
|
||||
|
||||
volatile int32_t i32_max = INT32_MAX;
|
||||
|
||||
#define IE (1 << 0)
|
||||
#define ZE (1 << 2)
|
||||
#define OE (1 << 3)
|
||||
#define UE (1 << 4)
|
||||
#define PE (1 << 5)
|
||||
#define EXC (IE | ZE | OE | UE | PE)
|
||||
|
||||
uint32_t mxcsr_default = 0x1f80;
|
||||
uint32_t mxcsr_ftz = 0x9f80;
|
||||
|
||||
int main(void)
|
||||
{
|
||||
uint32_t mxcsr;
|
||||
int32_t i32_res;
|
||||
int ret = 0;
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = f_snan;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: widen float snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = d_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: narrow float underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = d_max;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (OE | PE)) {
|
||||
printf("FAIL: narrow float overflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = d_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: narrow float inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = d_snan;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: narrow float snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("roundss $4, %0, %0" : "=x" (f_res) : "0" (f_min));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: roundss min\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("roundss $12, %0, %0" : "=x" (f_res) : "0" (f_min));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: roundss no-inexact min\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("roundss $4, %0, %0" : "=x" (f_res) : "0" (f_snan));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: roundss snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("roundss $12, %0, %0" : "=x" (f_res) : "0" (f_snan));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: roundss no-inexact snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("roundsd $4, %0, %0" : "=x" (d_res) : "0" (d_min));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: roundsd min\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("roundsd $12, %0, %0" : "=x" (d_res) : "0" (d_min));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: roundsd no-inexact min\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("roundsd $4, %0, %0" : "=x" (d_res) : "0" (d_snan));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: roundsd snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("roundsd $12, %0, %0" : "=x" (d_res) : "0" (d_snan));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: roundsd no-inexact snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("comiss %1, %0" : : "x" (f_nan), "x" (f_zero));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: comiss nan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("ucomiss %1, %0" : : "x" (f_nan), "x" (f_zero));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: ucomiss nan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("ucomiss %1, %0" : : "x" (f_snan), "x" (f_zero));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: ucomiss snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("comisd %1, %0" : : "x" (d_nan), "x" (d_zero));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: comisd nan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("ucomisd %1, %0" : : "x" (d_nan), "x" (d_zero));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: ucomisd nan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("ucomisd %1, %0" : : "x" (d_snan), "x" (d_zero));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: ucomisd snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_max + f_max;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (OE | PE)) {
|
||||
printf("FAIL: float add overflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_max + f_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: float add inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_inf + f_ninf;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: float add inf -inf\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_snan + f_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: float add snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_ftz));
|
||||
f_res = f_true_min + f_true_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: float add FTZ underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_max + d_max;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (OE | PE)) {
|
||||
printf("FAIL: double add overflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_max + d_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: double add inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_inf + d_ninf;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: double add inf -inf\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_snan + d_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: double add snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_ftz));
|
||||
d_res = d_true_min + d_true_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: double add FTZ underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_max - f_nmax;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (OE | PE)) {
|
||||
printf("FAIL: float sub overflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_max - f_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: float sub inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_inf - f_inf;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: float sub inf inf\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_snan - f_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: float sub snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_ftz));
|
||||
f_res = f_min - f_true_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: float sub FTZ underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_max - d_nmax;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (OE | PE)) {
|
||||
printf("FAIL: double sub overflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_max - d_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: double sub inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_inf - d_inf;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: double sub inf inf\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_snan - d_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: double sub snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_ftz));
|
||||
d_res = d_min - d_true_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: double sub FTZ underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_max * f_max;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (OE | PE)) {
|
||||
printf("FAIL: float mul overflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_third * f_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: float mul inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_min * f_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: float mul underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_inf * f_zero;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: float mul inf 0\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_snan * f_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: float mul snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_ftz));
|
||||
f_res = f_min * f_half;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: float mul FTZ underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_max * d_max;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (OE | PE)) {
|
||||
printf("FAIL: double mul overflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_third * d_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: double mul inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_min * d_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: double mul underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_inf * d_zero;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: double mul inf 0\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_snan * d_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: double mul snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_ftz));
|
||||
d_res = d_min * d_half;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: double mul FTZ underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_max / f_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (OE | PE)) {
|
||||
printf("FAIL: float div overflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_one / f_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: float div inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_min / f_max;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: float div underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_one / f_zero;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != ZE) {
|
||||
printf("FAIL: float div 1 0\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_inf / f_zero;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: float div inf 0\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_nan / f_zero;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: float div nan 0\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_zero / f_zero;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: float div 0 0\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_inf / f_inf;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: float div inf inf\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
f_res = f_snan / f_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: float div snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_ftz));
|
||||
f_res = f_min / f_two;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: float div FTZ underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_max / d_min;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (OE | PE)) {
|
||||
printf("FAIL: double div overflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_one / d_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: double div inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_min / d_max;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: double div underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_one / d_zero;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != ZE) {
|
||||
printf("FAIL: double div 1 0\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_inf / d_zero;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: double div inf 0\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_nan / d_zero;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: double div nan 0\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_zero / d_zero;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: double div 0 0\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_inf / d_inf;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: double div inf inf\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
d_res = d_snan / d_third;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: double div snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_ftz));
|
||||
d_res = d_min / d_two;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != (UE | PE)) {
|
||||
printf("FAIL: double div FTZ underflow\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("sqrtss %0, %0" : "=x" (f_res) : "0" (f_max));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: sqrtss inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("sqrtss %0, %0" : "=x" (f_res) : "0" (f_nmax));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: sqrtss -max\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("sqrtss %0, %0" : "=x" (f_res) : "0" (f_ninf));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: sqrtss -inf\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("sqrtss %0, %0" : "=x" (f_res) : "0" (f_snan));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: sqrtss snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("sqrtss %0, %0" : "=x" (f_res) : "0" (f_nzero));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: sqrtss -0\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("sqrtss %0, %0" : "=x" (f_res) :
|
||||
"0" (-__builtin_nanf("")));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: sqrtss -nan\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("sqrtsd %0, %0" : "=x" (d_res) : "0" (d_max));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: sqrtsd inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("sqrtsd %0, %0" : "=x" (d_res) : "0" (d_nmax));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: sqrtsd -max\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("sqrtsd %0, %0" : "=x" (d_res) : "0" (d_ninf));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: sqrtsd -inf\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("sqrtsd %0, %0" : "=x" (d_res) : "0" (d_snan));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: sqrtsd snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("sqrtsd %0, %0" : "=x" (d_res) : "0" (d_nzero));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: sqrtsd -0\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("sqrtsd %0, %0" : "=x" (d_res) :
|
||||
"0" (-__builtin_nan("")));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: sqrtsd -nan\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("maxss %1, %0" : : "x" (f_nan), "x" (f_zero));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: maxss nan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("minss %1, %0" : : "x" (f_nan), "x" (f_zero));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: minss nan\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("maxsd %1, %0" : : "x" (d_nan), "x" (d_zero));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: maxsd nan\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("minsd %1, %0" : : "x" (d_nan), "x" (d_zero));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: minsd nan\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvtsi2ss %1, %0" : "=x" (f_res) : "m" (i32_max));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: cvtsi2ss inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvtsi2sd %1, %0" : "=x" (d_res) : "m" (i32_max));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: cvtsi2sd exact\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvtss2si %1, %0" : "=r" (i32_res) : "x" (1.5f));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: cvtss2si inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvtss2si %1, %0" : "=r" (i32_res) : "x" (0x1p31f));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: cvtss2si 0x1p31\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvtss2si %1, %0" : "=r" (i32_res) : "x" (f_inf));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: cvtss2si inf\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvtsd2si %1, %0" : "=r" (i32_res) : "x" (1.5));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: cvtsd2si inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvtsd2si %1, %0" : "=r" (i32_res) : "x" (0x1p31));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: cvtsd2si 0x1p31\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvtsd2si %1, %0" : "=r" (i32_res) : "x" (d_inf));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: cvtsd2si inf\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvttss2si %1, %0" : "=r" (i32_res) : "x" (1.5f));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: cvttss2si inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvttss2si %1, %0" : "=r" (i32_res) : "x" (0x1p31f));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: cvttss2si 0x1p31\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvttss2si %1, %0" : "=r" (i32_res) : "x" (f_inf));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: cvttss2si inf\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvttsd2si %1, %0" : "=r" (i32_res) : "x" (1.5));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != PE) {
|
||||
printf("FAIL: cvttsd2si inexact\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvttsd2si %1, %0" : "=r" (i32_res) : "x" (0x1p31));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: cvttsd2si 0x1p31\n");
|
||||
ret = 1;
|
||||
}
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("cvttsd2si %1, %0" : "=r" (i32_res) : "x" (d_inf));
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != IE) {
|
||||
printf("FAIL: cvttsd2si inf\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("rcpss %0, %0" : "=x" (f_res) : "0" (f_snan));
|
||||
f_res += f_one;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: rcpss snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
__asm__ volatile ("ldmxcsr %0" : : "m" (mxcsr_default));
|
||||
__asm__ volatile ("rsqrtss %0, %0" : "=x" (f_res) : "0" (f_snan));
|
||||
f_res += f_one;
|
||||
__asm__ volatile ("stmxcsr %0" : "=m" (mxcsr));
|
||||
if ((mxcsr & EXC) != 0) {
|
||||
printf("FAIL: rsqrtss snan\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -32,6 +32,7 @@
|
|||
#include "ui/input.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "sysemu/cpu-throttle.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/qapi-commands-block.h"
|
||||
#include "qapi/qapi-commands-misc.h"
|
||||
|
|
|
@ -26,6 +26,8 @@ typedef enum {
|
|||
|
||||
/* Prepend timestamp to messages */
|
||||
bool error_with_timestamp;
|
||||
bool error_with_guestname;
|
||||
const char *error_guest_name;
|
||||
|
||||
int error_printf(const char *fmt, ...)
|
||||
{
|
||||
|
@ -213,6 +215,11 @@ static void vreport(report_type type, const char *fmt, va_list ap)
|
|||
g_free(timestr);
|
||||
}
|
||||
|
||||
/* Only prepend guest name if -msg guest-name and -name guest=... are set */
|
||||
if (error_with_guestname && error_guest_name && !cur_mon) {
|
||||
error_printf("%s ", error_guest_name);
|
||||
}
|
||||
|
||||
print_loc();
|
||||
|
||||
switch (type) {
|
||||
|
|
Loading…
Reference in New Issue