mirror of https://gitee.com/openkylin/qemu.git
- Migration and linuxboot fixes for 2.2 regressions
- valgrind/KVM support - small i386 patches - PCI SD host controller support - malloc/free cleanups from Markus (x86/scsi) - IvyBridge model - XSAVES support for KVM - initial patches from record/replay -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJUjw28AAoJEL/70l94x66D9kcH/RBoc4mNjrSt+MLy9Y+Fu1bu HNhfd1n/yA0MKSHtSYwJPgkiuoxG3jHt0N69gbpZE0kdBcK+PPZZZUpTFIAU6vD/ D0O7l+2viOcl2z7SPuHIp9/O0CChsAYZkH+Zn2XbeStbe4d4f6bFzdy4vblMsirQ BfMn/Y2Dw1uLknvrO3/QKgGhbK5Nxo/Te7lavRP+w7FgOhAdAUHOhBPfGrPWtG+0 0hVWmxoQyJtk+Ltt2oF4zUkql7czDsgyXkaO82l3TkecCvtqolCuby4lQIFJnq7E vw0XUDwC/l/MWnXFq/rG97yopfIxkSAthT/xP/+TTJKM/oJEWDTh6I8ghQTdG90= =ncys -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging - Migration and linuxboot fixes for 2.2 regressions - valgrind/KVM support - small i386 patches - PCI SD host controller support - malloc/free cleanups from Markus (x86/scsi) - IvyBridge model - XSAVES support for KVM - initial patches from record/replay # gpg: Signature made Mon 15 Dec 2014 16:35:08 GMT using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # gpg: WARNING: This key is not certified with sufficiently trusted signatures! # gpg: It is not certain that the signature belongs to the owner. # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (47 commits) sdhci: Support SDHCI devices on PCI sdhci: Define SDHCI PCI ids sdhci: Add "sysbus" to sdhci QOM types and methods sdhci: Remove class "virtual" methods sdhci: Set a default frequency clock serial: only resample THR interrupt on rising edge of IER.THRI serial: update LSR on enabling/disabling FIFOs serial: clean up THRE/TEMT handling serial: reset thri_pending on IER writes with THRI=0 linuxboot: fix loading old kernels kvm/apic: fix 2.2->2.1 migration target-i386: add Ivy Bridge CPU model target-i386: add f16c and rdrand to Haswell and Broadwell target-i386: add VME to all CPUs pc: add 2.3 machine types i386: do not cross the pages boundaries in replay mode cpus: make icount warp behave well with respect to stop/cont timer: introduce new QEMU_CLOCK_VIRTUAL_RT clock cpu-exec: invalidate nocache translation if they are interrupted icount: introduce cpu_get_icount_raw ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
dfa9c2a0f4
15
cpu-exec.c
15
cpu-exec.c
|
@ -168,7 +168,9 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
|
|||
}
|
||||
#endif /* DEBUG_DISAS */
|
||||
|
||||
cpu->can_do_io = 0;
|
||||
next_tb = tcg_qemu_tb_exec(env, tb_ptr);
|
||||
cpu->can_do_io = 1;
|
||||
trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
|
||||
next_tb & TB_EXIT_MASK);
|
||||
|
||||
|
@ -202,14 +204,19 @@ static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
|
|||
{
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
TranslationBlock *tb;
|
||||
target_ulong pc = orig_tb->pc;
|
||||
target_ulong cs_base = orig_tb->cs_base;
|
||||
uint64_t flags = orig_tb->flags;
|
||||
|
||||
/* Should never happen.
|
||||
We only end up here when an existing TB is too long. */
|
||||
if (max_cycles > CF_COUNT_MASK)
|
||||
max_cycles = CF_COUNT_MASK;
|
||||
|
||||
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
|
||||
max_cycles);
|
||||
/* tb_gen_code can flush our orig_tb, invalidate it now */
|
||||
tb_phys_invalidate(orig_tb, -1);
|
||||
tb = tb_gen_code(cpu, pc, cs_base, flags,
|
||||
max_cycles | CF_NOCACHE);
|
||||
cpu->current_tb = tb;
|
||||
/* execute the generated code */
|
||||
trace_exec_tb_nocache(tb, tb->pc);
|
||||
|
@ -353,7 +360,6 @@ int cpu_exec(CPUArchState *env)
|
|||
}
|
||||
|
||||
cc->cpu_exec_enter(cpu);
|
||||
cpu->exception_index = -1;
|
||||
|
||||
/* Calculate difference between guest clock and host clock.
|
||||
* This delay includes the delay of the last cycle, so
|
||||
|
@ -373,6 +379,7 @@ int cpu_exec(CPUArchState *env)
|
|||
if (ret == EXCP_DEBUG) {
|
||||
cpu_handle_debug_exception(env);
|
||||
}
|
||||
cpu->exception_index = -1;
|
||||
break;
|
||||
} else {
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
@ -383,6 +390,7 @@ int cpu_exec(CPUArchState *env)
|
|||
cc->do_interrupt(cpu);
|
||||
#endif
|
||||
ret = cpu->exception_index;
|
||||
cpu->exception_index = -1;
|
||||
break;
|
||||
#else
|
||||
cc->do_interrupt(cpu);
|
||||
|
@ -537,6 +545,7 @@ int cpu_exec(CPUArchState *env)
|
|||
cpu = current_cpu;
|
||||
env = cpu->env_ptr;
|
||||
cc = CPU_GET_CLASS(cpu);
|
||||
cpu->can_do_io = 1;
|
||||
#ifdef TARGET_I386
|
||||
x86_cpu = X86_CPU(cpu);
|
||||
#endif
|
||||
|
|
40
cpus.c
40
cpus.c
|
@ -136,8 +136,7 @@ typedef struct TimersState {
|
|||
|
||||
static TimersState timers_state;
|
||||
|
||||
/* Return the virtual CPU time, based on the instruction counter. */
|
||||
static int64_t cpu_get_icount_locked(void)
|
||||
int64_t cpu_get_icount_raw(void)
|
||||
{
|
||||
int64_t icount;
|
||||
CPUState *cpu = current_cpu;
|
||||
|
@ -145,10 +144,18 @@ static int64_t cpu_get_icount_locked(void)
|
|||
icount = timers_state.qemu_icount;
|
||||
if (cpu) {
|
||||
if (!cpu_can_do_io(cpu)) {
|
||||
fprintf(stderr, "Bad clock read\n");
|
||||
fprintf(stderr, "Bad icount read\n");
|
||||
exit(1);
|
||||
}
|
||||
icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
|
||||
}
|
||||
return icount;
|
||||
}
|
||||
|
||||
/* Return the virtual CPU time, based on the instruction counter. */
|
||||
static int64_t cpu_get_icount_locked(void)
|
||||
{
|
||||
int64_t icount = cpu_get_icount_raw();
|
||||
return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
|
||||
}
|
||||
|
||||
|
@ -345,7 +352,7 @@ static void icount_warp_rt(void *opaque)
|
|||
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock);
|
||||
if (runstate_is_running()) {
|
||||
int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
int64_t clock = cpu_get_clock_locked();
|
||||
int64_t warp_delta;
|
||||
|
||||
warp_delta = clock - vm_clock_warp_start;
|
||||
|
@ -354,9 +361,8 @@ static void icount_warp_rt(void *opaque)
|
|||
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
|
||||
* far ahead of real time.
|
||||
*/
|
||||
int64_t cur_time = cpu_get_clock_locked();
|
||||
int64_t cur_icount = cpu_get_icount_locked();
|
||||
int64_t delta = cur_time - cur_icount;
|
||||
int64_t delta = clock - cur_icount;
|
||||
warp_delta = MIN(warp_delta, delta);
|
||||
}
|
||||
timers_state.qemu_icount_bias += warp_delta;
|
||||
|
@ -419,7 +425,7 @@ void qemu_clock_warp(QEMUClockType type)
|
|||
}
|
||||
|
||||
/* We want to use the earliest deadline from ALL vm_clocks */
|
||||
clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
|
||||
if (deadline < 0) {
|
||||
return;
|
||||
|
@ -437,8 +443,8 @@ void qemu_clock_warp(QEMUClockType type)
|
|||
* sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
|
||||
* timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
|
||||
* event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
|
||||
* after some e"real" time, (related to the time left until the next
|
||||
* event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
|
||||
* after some "real" time, (related to the time left until the next
|
||||
* event) has passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
|
||||
* This avoids that the warps are visible externally; for example,
|
||||
* you will not be sending network packets continuously instead of
|
||||
* every 100ms.
|
||||
|
@ -512,8 +518,8 @@ void configure_icount(QemuOpts *opts, Error **errp)
|
|||
return;
|
||||
}
|
||||
icount_align_option = qemu_opt_get_bool(opts, "align", false);
|
||||
icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
|
||||
icount_warp_rt, NULL);
|
||||
icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
|
||||
icount_warp_rt, NULL);
|
||||
if (strcmp(option, "auto") != 0) {
|
||||
errno = 0;
|
||||
icount_time_shift = strtol(option, &rem_str, 0);
|
||||
|
@ -537,10 +543,10 @@ void configure_icount(QemuOpts *opts, Error **errp)
|
|||
the virtual time trigger catches emulated time passing too fast.
|
||||
Realtime triggers occur even when idle, so use them less frequently
|
||||
than VM triggers. */
|
||||
icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
|
||||
icount_adjust_rt, NULL);
|
||||
icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
|
||||
icount_adjust_rt, NULL);
|
||||
timer_mod(icount_rt_timer,
|
||||
qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
|
||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
|
||||
icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||
icount_adjust_vm, NULL);
|
||||
timer_mod(icount_vm_timer,
|
||||
|
@ -934,6 +940,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
|
|||
qemu_mutex_lock(&qemu_global_mutex);
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->exception_index = -1;
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
r = kvm_init_vcpu(cpu);
|
||||
|
@ -974,6 +982,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
|
|||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->exception_index = -1;
|
||||
cpu->can_do_io = 1;
|
||||
|
||||
sigemptyset(&waitset);
|
||||
sigaddset(&waitset, SIG_IPI);
|
||||
|
@ -1016,6 +1026,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
|||
CPU_FOREACH(cpu) {
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->created = true;
|
||||
cpu->exception_index = -1;
|
||||
cpu->can_do_io = 1;
|
||||
}
|
||||
qemu_cond_signal(&qemu_cpu_cond);
|
||||
|
||||
|
|
|
@ -30,3 +30,5 @@ CONFIG_IPACK=y
|
|||
CONFIG_WDT_IB6300ESB=y
|
||||
CONFIG_PCI_TESTDEV=y
|
||||
CONFIG_NVME_PCI=y
|
||||
CONFIG_SD=y
|
||||
CONFIG_SDHCI=y
|
||||
|
|
|
@ -44,6 +44,8 @@ PCI devices (other than virtio):
|
|||
1b36:0002 PCI serial port (16550A) adapter (docs/specs/pci-serial.txt)
|
||||
1b36:0003 PCI Dual-port 16550A adapter (docs/specs/pci-serial.txt)
|
||||
1b36:0004 PCI Quad-port 16550A adapter (docs/specs/pci-serial.txt)
|
||||
1b36:0005 PCI test device (docs/specs/pci-testdev.txt)
|
||||
1b36:0006 PCI SD Card Host Controller Interface (SDHCI)
|
||||
|
||||
All these devices are documented in docs/specs.
|
||||
|
||||
|
|
|
@ -224,21 +224,23 @@ static gboolean serial_xmit(GIOChannel *chan, GIOCondition cond, void *opaque)
|
|||
SerialState *s = opaque;
|
||||
|
||||
do {
|
||||
assert(!(s->lsr & UART_LSR_TEMT));
|
||||
if (s->tsr_retry <= 0) {
|
||||
assert(!(s->lsr & UART_LSR_THRE));
|
||||
|
||||
if (s->fcr & UART_FCR_FE) {
|
||||
if (fifo8_is_empty(&s->xmit_fifo)) {
|
||||
return FALSE;
|
||||
}
|
||||
assert(!fifo8_is_empty(&s->xmit_fifo));
|
||||
s->tsr = fifo8_pop(&s->xmit_fifo);
|
||||
if (!s->xmit_fifo.num) {
|
||||
s->lsr |= UART_LSR_THRE;
|
||||
}
|
||||
} else if ((s->lsr & UART_LSR_THRE)) {
|
||||
return FALSE;
|
||||
} else {
|
||||
s->tsr = s->thr;
|
||||
s->lsr |= UART_LSR_THRE;
|
||||
s->lsr &= ~UART_LSR_TEMT;
|
||||
}
|
||||
if ((s->lsr & UART_LSR_THRE) && !s->thr_ipending) {
|
||||
s->thr_ipending = 1;
|
||||
serial_update_irq(s);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -256,17 +258,13 @@ static gboolean serial_xmit(GIOChannel *chan, GIOCondition cond, void *opaque)
|
|||
} else {
|
||||
s->tsr_retry = 0;
|
||||
}
|
||||
|
||||
/* Transmit another byte if it is already available. It is only
|
||||
possible when FIFO is enabled and not empty. */
|
||||
} while ((s->fcr & UART_FCR_FE) && !fifo8_is_empty(&s->xmit_fifo));
|
||||
} while (!(s->lsr & UART_LSR_THRE));
|
||||
|
||||
s->last_xmit_ts = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
||||
|
||||
if (s->lsr & UART_LSR_THRE) {
|
||||
s->lsr |= UART_LSR_TEMT;
|
||||
s->thr_ipending = 1;
|
||||
serial_update_irq(s);
|
||||
}
|
||||
s->lsr |= UART_LSR_TEMT;
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -323,10 +321,10 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
|
|||
fifo8_pop(&s->xmit_fifo);
|
||||
}
|
||||
fifo8_push(&s->xmit_fifo, s->thr);
|
||||
s->lsr &= ~UART_LSR_TEMT;
|
||||
}
|
||||
s->thr_ipending = 0;
|
||||
s->lsr &= ~UART_LSR_THRE;
|
||||
s->lsr &= ~UART_LSR_TEMT;
|
||||
serial_update_irq(s);
|
||||
if (s->tsr_retry <= 0) {
|
||||
serial_xmit(NULL, G_IO_OUT, s);
|
||||
|
@ -338,10 +336,12 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
|
|||
s->divider = (s->divider & 0x00ff) | (val << 8);
|
||||
serial_update_parameters(s);
|
||||
} else {
|
||||
uint8_t changed = (s->ier ^ val) & 0x0f;
|
||||
s->ier = val & 0x0f;
|
||||
/* If the backend device is a real serial port, turn polling of the modem
|
||||
status lines on physical port on or off depending on UART_IER_MSI state */
|
||||
if (s->poll_msl >= 0) {
|
||||
* status lines on physical port on or off depending on UART_IER_MSI state.
|
||||
*/
|
||||
if ((changed & UART_IER_MSI) && s->poll_msl >= 0) {
|
||||
if (s->ier & UART_IER_MSI) {
|
||||
s->poll_msl = 1;
|
||||
serial_update_msl(s);
|
||||
|
@ -350,8 +350,27 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
|
|||
s->poll_msl = 0;
|
||||
}
|
||||
}
|
||||
if (s->lsr & UART_LSR_THRE) {
|
||||
s->thr_ipending = 1;
|
||||
|
||||
/* Turning on the THRE interrupt on IER can trigger the interrupt
|
||||
* if LSR.THRE=1, even if it had been masked before by reading IIR.
|
||||
* This is not in the datasheet, but Windows relies on it. It is
|
||||
* unclear if THRE has to be resampled every time THRI becomes
|
||||
* 1, or only on the rising edge. Bochs does the latter, and Windows
|
||||
* always toggles IER to all zeroes and back to all ones, so do the
|
||||
* same.
|
||||
*
|
||||
* If IER.THRI is zero, thr_ipending is not used. Set it to zero
|
||||
* so that the thr_ipending subsection is not migrated.
|
||||
*/
|
||||
if (changed & UART_IER_THRI) {
|
||||
if ((s->ier & UART_IER_THRI) && (s->lsr & UART_LSR_THRE)) {
|
||||
s->thr_ipending = 1;
|
||||
} else {
|
||||
s->thr_ipending = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (changed) {
|
||||
serial_update_irq(s);
|
||||
}
|
||||
}
|
||||
|
@ -365,12 +384,15 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
|
|||
/* FIFO clear */
|
||||
|
||||
if (val & UART_FCR_RFR) {
|
||||
s->lsr &= ~(UART_LSR_DR | UART_LSR_BI);
|
||||
timer_del(s->fifo_timeout_timer);
|
||||
s->timeout_ipending = 0;
|
||||
fifo8_reset(&s->recv_fifo);
|
||||
}
|
||||
|
||||
if (val & UART_FCR_XFR) {
|
||||
s->lsr |= UART_LSR_THRE;
|
||||
s->thr_ipending = 1;
|
||||
fifo8_reset(&s->xmit_fifo);
|
||||
}
|
||||
|
||||
|
|
|
@ -171,13 +171,16 @@ static const MemoryRegionOps kvm_apic_io_ops = {
|
|||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
};
|
||||
|
||||
static void kvm_apic_reset(APICCommonState *s)
|
||||
{
|
||||
/* Not used by KVM, which uses the CPU mp_state instead. */
|
||||
s->wait_for_sipi = 0;
|
||||
}
|
||||
|
||||
static void kvm_apic_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
APICCommonState *s = APIC_COMMON(dev);
|
||||
|
||||
/* Not used by KVM, which uses the CPU mp_state instead. */
|
||||
s->wait_for_sipi = 0;
|
||||
|
||||
memory_region_init_io(&s->io_memory, NULL, &kvm_apic_io_ops, s, "kvm-apic-msi",
|
||||
APIC_SPACE_SIZE);
|
||||
|
||||
|
@ -191,6 +194,7 @@ static void kvm_apic_class_init(ObjectClass *klass, void *data)
|
|||
APICCommonClass *k = APIC_COMMON_CLASS(klass);
|
||||
|
||||
k->realize = kvm_apic_realize;
|
||||
k->reset = kvm_apic_reset;
|
||||
k->set_base = kvm_apic_set_base;
|
||||
k->set_tpr = kvm_apic_set_tpr;
|
||||
k->get_tpr = kvm_apic_get_tpr;
|
||||
|
|
|
@ -88,7 +88,7 @@ static void kvmclock_vm_state_change(void *opaque, int running,
|
|||
int ret;
|
||||
|
||||
if (running) {
|
||||
struct kvm_clock_data data;
|
||||
struct kvm_clock_data data = {};
|
||||
uint64_t time_at_migration = kvmclock_current_nsec(s);
|
||||
|
||||
s->clock_valid = false;
|
||||
|
@ -99,7 +99,6 @@ static void kvmclock_vm_state_change(void *opaque, int running,
|
|||
}
|
||||
|
||||
data.clock = s->clock;
|
||||
data.flags = 0;
|
||||
ret = kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, &data);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "KVM_SET_CLOCK failed: %s\n", strerror(ret));
|
||||
|
|
|
@ -138,7 +138,7 @@ static void kvm_pit_get(PITCommonState *pit)
|
|||
static void kvm_pit_put(PITCommonState *pit)
|
||||
{
|
||||
KVMPITState *s = KVM_PIT(pit);
|
||||
struct kvm_pit_state2 kpit;
|
||||
struct kvm_pit_state2 kpit = {};
|
||||
struct kvm_pit_channel_state *kchan;
|
||||
struct PITChannelState *sc;
|
||||
int i, ret;
|
||||
|
|
|
@ -54,6 +54,7 @@ enum {
|
|||
MBI_MODS_COUNT = 20,
|
||||
MBI_MODS_ADDR = 24,
|
||||
MBI_MMAP_ADDR = 48,
|
||||
MBI_BOOTLOADER = 64,
|
||||
|
||||
MBI_SIZE = 88,
|
||||
|
||||
|
@ -74,6 +75,7 @@ enum {
|
|||
MULTIBOOT_FLAGS_CMDLINE = 1 << 2,
|
||||
MULTIBOOT_FLAGS_MODULES = 1 << 3,
|
||||
MULTIBOOT_FLAGS_MMAP = 1 << 6,
|
||||
MULTIBOOT_FLAGS_BOOTLOADER = 1 << 9,
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
|
@ -87,6 +89,8 @@ typedef struct {
|
|||
hwaddr offset_mbinfo;
|
||||
/* offset in buffer for cmdlines in bytes */
|
||||
hwaddr offset_cmdlines;
|
||||
/* offset in buffer for bootloader name in bytes */
|
||||
hwaddr offset_bootloader;
|
||||
/* offset of modules in bytes */
|
||||
hwaddr offset_mods;
|
||||
/* available slots for mb modules infos */
|
||||
|
@ -95,6 +99,8 @@ typedef struct {
|
|||
int mb_mods_count;
|
||||
} MultibootState;
|
||||
|
||||
const char *bootloader_name = "qemu";
|
||||
|
||||
static uint32_t mb_add_cmdline(MultibootState *s, const char *cmdline)
|
||||
{
|
||||
hwaddr p = s->offset_cmdlines;
|
||||
|
@ -105,6 +111,16 @@ static uint32_t mb_add_cmdline(MultibootState *s, const char *cmdline)
|
|||
return s->mb_buf_phys + p;
|
||||
}
|
||||
|
||||
static uint32_t mb_add_bootloader(MultibootState *s, const char *bootloader)
|
||||
{
|
||||
hwaddr p = s->offset_bootloader;
|
||||
char *b = (char *)s->mb_buf + p;
|
||||
|
||||
memcpy(b, bootloader, strlen(bootloader) + 1);
|
||||
s->offset_bootloader += strlen(b) + 1;
|
||||
return s->mb_buf_phys + p;
|
||||
}
|
||||
|
||||
static void mb_add_mod(MultibootState *s,
|
||||
hwaddr start, hwaddr end,
|
||||
hwaddr cmdline_phys)
|
||||
|
@ -241,9 +257,10 @@ int load_multiboot(FWCfgState *fw_cfg,
|
|||
mbs.mb_buf_size = TARGET_PAGE_ALIGN(mb_kernel_size);
|
||||
mbs.offset_mbinfo = mbs.mb_buf_size;
|
||||
|
||||
/* Calculate space for cmdlines and mb_mods */
|
||||
/* Calculate space for cmdlines, bootloader name, and mb_mods */
|
||||
mbs.mb_buf_size += strlen(kernel_filename) + 1;
|
||||
mbs.mb_buf_size += strlen(kernel_cmdline) + 1;
|
||||
mbs.mb_buf_size += strlen(bootloader_name) + 1;
|
||||
if (initrd_filename) {
|
||||
const char *r = initrd_filename;
|
||||
mbs.mb_buf_size += strlen(r) + 1;
|
||||
|
@ -257,9 +274,11 @@ int load_multiboot(FWCfgState *fw_cfg,
|
|||
|
||||
mbs.mb_buf_size = TARGET_PAGE_ALIGN(mbs.mb_buf_size);
|
||||
|
||||
/* enlarge mb_buf to hold cmdlines and mb-info structs */
|
||||
mbs.mb_buf = g_realloc(mbs.mb_buf, mbs.mb_buf_size);
|
||||
mbs.offset_cmdlines = mbs.offset_mbinfo + mbs.mb_mods_avail * MB_MOD_SIZE;
|
||||
/* enlarge mb_buf to hold cmdlines, bootloader, mb-info structs */
|
||||
mbs.mb_buf = g_realloc(mbs.mb_buf, mbs.mb_buf_size);
|
||||
mbs.offset_cmdlines = mbs.offset_mbinfo + mbs.mb_mods_avail * MB_MOD_SIZE;
|
||||
mbs.offset_bootloader = mbs.offset_cmdlines + strlen(kernel_filename) + 1
|
||||
+ strlen(kernel_cmdline) + 1;
|
||||
|
||||
if (initrd_filename) {
|
||||
char *next_initrd, not_last;
|
||||
|
@ -306,6 +325,8 @@ int load_multiboot(FWCfgState *fw_cfg,
|
|||
kernel_filename, kernel_cmdline);
|
||||
stl_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline));
|
||||
|
||||
stl_p(bootinfo + MBI_BOOTLOADER, mb_add_bootloader(&mbs, bootloader_name));
|
||||
|
||||
stl_p(bootinfo + MBI_MODS_ADDR, mbs.mb_buf_phys + mbs.offset_mbinfo);
|
||||
stl_p(bootinfo + MBI_MODS_COUNT, mbs.mb_mods_count); /* mods_count */
|
||||
|
||||
|
@ -314,7 +335,8 @@ int load_multiboot(FWCfgState *fw_cfg,
|
|||
| MULTIBOOT_FLAGS_BOOT_DEVICE
|
||||
| MULTIBOOT_FLAGS_CMDLINE
|
||||
| MULTIBOOT_FLAGS_MODULES
|
||||
| MULTIBOOT_FLAGS_MMAP);
|
||||
| MULTIBOOT_FLAGS_MMAP
|
||||
| MULTIBOOT_FLAGS_BOOTLOADER);
|
||||
stl_p(bootinfo + MBI_BOOT_DEVICE, 0x8000ffff); /* XXX: use the -boot switch? */
|
||||
stl_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP);
|
||||
|
||||
|
|
|
@ -602,8 +602,7 @@ int e820_add_entry(uint64_t address, uint64_t length, uint32_t type)
|
|||
}
|
||||
|
||||
/* new "etc/e820" file -- include ram too */
|
||||
e820_table = g_realloc(e820_table,
|
||||
sizeof(struct e820_entry) * (e820_entries+1));
|
||||
e820_table = g_renew(struct e820_entry, e820_table, e820_entries + 1);
|
||||
e820_table[e820_entries].address = cpu_to_le64(address);
|
||||
e820_table[e820_entries].length = cpu_to_le64(length);
|
||||
e820_table[e820_entries].type = cpu_to_le32(type);
|
||||
|
|
|
@ -308,9 +308,33 @@ static void pc_init_pci(MachineState *machine)
|
|||
pc_init1(machine, 1, 1);
|
||||
}
|
||||
|
||||
static void pc_compat_2_2(MachineState *machine)
|
||||
{
|
||||
x86_cpu_compat_set_features("kvm64", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("kvm32", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Conroe", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Penryn", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Nehalem", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Westmere", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("SandyBridge", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Haswell", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Broadwell", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Opteron_G1", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Opteron_G2", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Opteron_G3", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Opteron_G4", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Opteron_G5", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Haswell", FEAT_1_ECX, 0, CPUID_EXT_F16C);
|
||||
x86_cpu_compat_set_features("Haswell", FEAT_1_ECX, 0, CPUID_EXT_RDRAND);
|
||||
x86_cpu_compat_set_features("Broadwell", FEAT_1_ECX, 0, CPUID_EXT_F16C);
|
||||
x86_cpu_compat_set_features("Broadwell", FEAT_1_ECX, 0, CPUID_EXT_RDRAND);
|
||||
}
|
||||
|
||||
static void pc_compat_2_1(MachineState *machine)
|
||||
{
|
||||
PCMachineState *pcms = PC_MACHINE(machine);
|
||||
|
||||
pc_compat_2_2(machine);
|
||||
smbios_uuid_encoded = false;
|
||||
x86_cpu_compat_set_features("coreduo", FEAT_1_ECX, CPUID_EXT_VMX, 0);
|
||||
x86_cpu_compat_set_features("core2duo", FEAT_1_ECX, CPUID_EXT_VMX, 0);
|
||||
|
@ -385,6 +409,12 @@ static void pc_compat_1_2(MachineState *machine)
|
|||
x86_cpu_compat_kvm_no_autoenable(FEAT_KVM, KVM_FEATURE_PV_EOI);
|
||||
}
|
||||
|
||||
static void pc_init_pci_2_2(MachineState *machine)
|
||||
{
|
||||
pc_compat_2_2(machine);
|
||||
pc_init_pci(machine);
|
||||
}
|
||||
|
||||
static void pc_init_pci_2_1(MachineState *machine)
|
||||
{
|
||||
pc_compat_2_1(machine);
|
||||
|
@ -478,19 +508,27 @@ static void pc_xen_hvm_init(MachineState *machine)
|
|||
.desc = "Standard PC (i440FX + PIIX, 1996)", \
|
||||
.hot_add_cpu = pc_hot_add_cpu
|
||||
|
||||
#define PC_I440FX_2_2_MACHINE_OPTIONS \
|
||||
#define PC_I440FX_2_3_MACHINE_OPTIONS \
|
||||
PC_I440FX_MACHINE_OPTIONS, \
|
||||
.default_machine_opts = "firmware=bios-256k.bin", \
|
||||
.default_display = "std"
|
||||
|
||||
static QEMUMachine pc_i440fx_machine_v2_2 = {
|
||||
PC_I440FX_2_2_MACHINE_OPTIONS,
|
||||
.name = "pc-i440fx-2.2",
|
||||
static QEMUMachine pc_i440fx_machine_v2_3 = {
|
||||
PC_I440FX_2_3_MACHINE_OPTIONS,
|
||||
.name = "pc-i440fx-2.3",
|
||||
.alias = "pc",
|
||||
.init = pc_init_pci,
|
||||
.is_default = 1,
|
||||
};
|
||||
|
||||
#define PC_I440FX_2_2_MACHINE_OPTIONS PC_I440FX_2_3_MACHINE_OPTIONS
|
||||
|
||||
static QEMUMachine pc_i440fx_machine_v2_2 = {
|
||||
PC_I440FX_2_2_MACHINE_OPTIONS,
|
||||
.name = "pc-i440fx-2.2",
|
||||
.init = pc_init_pci_2_2,
|
||||
};
|
||||
|
||||
#define PC_I440FX_2_1_MACHINE_OPTIONS \
|
||||
PC_I440FX_MACHINE_OPTIONS, \
|
||||
.default_machine_opts = "firmware=bios-256k.bin"
|
||||
|
@ -928,6 +966,7 @@ static QEMUMachine xenfv_machine = {
|
|||
|
||||
static void pc_machine_init(void)
|
||||
{
|
||||
qemu_register_pc_machine(&pc_i440fx_machine_v2_3);
|
||||
qemu_register_pc_machine(&pc_i440fx_machine_v2_2);
|
||||
qemu_register_pc_machine(&pc_i440fx_machine_v2_1);
|
||||
qemu_register_pc_machine(&pc_i440fx_machine_v2_0);
|
||||
|
|
|
@ -287,10 +287,33 @@ static void pc_q35_init(MachineState *machine)
|
|||
}
|
||||
}
|
||||
|
||||
static void pc_compat_2_2(MachineState *machine)
|
||||
{
|
||||
x86_cpu_compat_set_features("kvm64", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("kvm32", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Conroe", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Penryn", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Nehalem", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Westmere", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("SandyBridge", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Haswell", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Broadwell", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Opteron_G1", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Opteron_G2", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Opteron_G3", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Opteron_G4", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Opteron_G5", FEAT_1_EDX, 0, CPUID_VME);
|
||||
x86_cpu_compat_set_features("Haswell", FEAT_1_ECX, 0, CPUID_EXT_F16C);
|
||||
x86_cpu_compat_set_features("Haswell", FEAT_1_ECX, 0, CPUID_EXT_RDRAND);
|
||||
x86_cpu_compat_set_features("Broadwell", FEAT_1_ECX, 0, CPUID_EXT_F16C);
|
||||
x86_cpu_compat_set_features("Broadwell", FEAT_1_ECX, 0, CPUID_EXT_RDRAND);
|
||||
}
|
||||
|
||||
static void pc_compat_2_1(MachineState *machine)
|
||||
{
|
||||
PCMachineState *pcms = PC_MACHINE(machine);
|
||||
|
||||
pc_compat_2_2(machine);
|
||||
pcms->enforce_aligned_dimm = false;
|
||||
smbios_uuid_encoded = false;
|
||||
x86_cpu_compat_set_features("coreduo", FEAT_1_ECX, CPUID_EXT_VMX, 0);
|
||||
|
@ -334,6 +357,12 @@ static void pc_compat_1_4(MachineState *machine)
|
|||
x86_cpu_compat_set_features("Westmere", FEAT_1_ECX, 0, CPUID_EXT_PCLMULQDQ);
|
||||
}
|
||||
|
||||
static void pc_q35_init_2_2(MachineState *machine)
|
||||
{
|
||||
pc_compat_2_2(machine);
|
||||
pc_q35_init(machine);
|
||||
}
|
||||
|
||||
static void pc_q35_init_2_1(MachineState *machine)
|
||||
{
|
||||
pc_compat_2_1(machine);
|
||||
|
@ -377,16 +406,24 @@ static void pc_q35_init_1_4(MachineState *machine)
|
|||
.hot_add_cpu = pc_hot_add_cpu, \
|
||||
.units_per_default_bus = 1
|
||||
|
||||
#define PC_Q35_2_2_MACHINE_OPTIONS \
|
||||
#define PC_Q35_2_3_MACHINE_OPTIONS \
|
||||
PC_Q35_MACHINE_OPTIONS, \
|
||||
.default_machine_opts = "firmware=bios-256k.bin", \
|
||||
.default_display = "std"
|
||||
|
||||
static QEMUMachine pc_q35_machine_v2_3 = {
|
||||
PC_Q35_2_3_MACHINE_OPTIONS,
|
||||
.name = "pc-q35-2.3",
|
||||
.alias = "q35",
|
||||
.init = pc_q35_init,
|
||||
};
|
||||
|
||||
#define PC_Q35_2_2_MACHINE_OPTIONS PC_Q35_2_3_MACHINE_OPTIONS
|
||||
|
||||
static QEMUMachine pc_q35_machine_v2_2 = {
|
||||
PC_Q35_2_2_MACHINE_OPTIONS,
|
||||
.name = "pc-q35-2.2",
|
||||
.alias = "q35",
|
||||
.init = pc_q35_init,
|
||||
.init = pc_q35_init_2_2,
|
||||
};
|
||||
|
||||
#define PC_Q35_2_1_MACHINE_OPTIONS \
|
||||
|
@ -465,6 +502,7 @@ static QEMUMachine pc_q35_machine_v1_4 = {
|
|||
|
||||
static void pc_q35_machine_init(void)
|
||||
{
|
||||
qemu_register_pc_machine(&pc_q35_machine_v2_3);
|
||||
qemu_register_pc_machine(&pc_q35_machine_v2_2);
|
||||
qemu_register_pc_machine(&pc_q35_machine_v2_1);
|
||||
qemu_register_pc_machine(&pc_q35_machine_v2_0);
|
||||
|
|
|
@ -204,9 +204,7 @@ static void old_pc_system_rom_init(MemoryRegion *rom_memory, bool isapc_ram_fw)
|
|||
fprintf(stderr, "qemu: could not load PC BIOS '%s'\n", bios_name);
|
||||
exit(1);
|
||||
}
|
||||
if (filename) {
|
||||
g_free(filename);
|
||||
}
|
||||
g_free(filename);
|
||||
|
||||
/* map the last 128KB of the BIOS in ISA space */
|
||||
isa_bios_size = bios_size;
|
||||
|
|
|
@ -178,6 +178,7 @@ bool apic_next_timer(APICCommonState *s, int64_t current_time)
|
|||
void apic_init_reset(DeviceState *dev)
|
||||
{
|
||||
APICCommonState *s = APIC_COMMON(dev);
|
||||
APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
|
||||
int i;
|
||||
|
||||
if (!s) {
|
||||
|
@ -206,6 +207,10 @@ void apic_init_reset(DeviceState *dev)
|
|||
timer_del(s->timer);
|
||||
}
|
||||
s->timer_expiry = -1;
|
||||
|
||||
if (info->reset) {
|
||||
info->reset(s);
|
||||
}
|
||||
}
|
||||
|
||||
void apic_designate_bsp(DeviceState *dev)
|
||||
|
|
|
@ -248,7 +248,6 @@ static void kvm_openpic_realize(DeviceState *dev, Error **errp)
|
|||
kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
|
||||
}
|
||||
|
||||
kvm_irqfds_allowed = true;
|
||||
kvm_msi_via_irqfd_allowed = true;
|
||||
kvm_gsi_routing_allowed = true;
|
||||
|
||||
|
|
|
@ -448,7 +448,6 @@ static void xics_kvm_realize(DeviceState *dev, Error **errp)
|
|||
}
|
||||
|
||||
kvm_kernel_irqchip = true;
|
||||
kvm_irqfds_allowed = true;
|
||||
kvm_msi_via_irqfd_allowed = true;
|
||||
kvm_gsi_direct_mapping = true;
|
||||
|
||||
|
|
|
@ -406,7 +406,7 @@ static void vfio_enable_intx_kvm(VFIODevice *vdev)
|
|||
|
||||
if (!VFIO_ALLOW_KVM_INTX || !kvm_irqfds_enabled() ||
|
||||
vdev->intx.route.mode != PCI_INTX_ENABLED ||
|
||||
!kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) {
|
||||
!kvm_resamplefds_enabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -568,8 +568,7 @@ static int vfio_enable_intx(VFIODevice *vdev)
|
|||
* Only conditional to avoid generating error messages on platforms
|
||||
* where we won't actually use the result anyway.
|
||||
*/
|
||||
if (kvm_irqfds_enabled() &&
|
||||
kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) {
|
||||
if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
|
||||
vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
|
||||
vdev->intx.pin);
|
||||
}
|
||||
|
|
|
@ -781,7 +781,7 @@ static void lsi_do_command(LSIState *s)
|
|||
}
|
||||
|
||||
assert(s->current == NULL);
|
||||
s->current = g_malloc0(sizeof(lsi_request));
|
||||
s->current = g_new0(lsi_request, 1);
|
||||
s->current->tag = s->select_tag;
|
||||
s->current->req = scsi_req_new(dev, s->current->tag, s->current_lun, buf,
|
||||
s->current);
|
||||
|
|
|
@ -1018,8 +1018,7 @@ static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun,
|
|||
size_t len, resid;
|
||||
|
||||
if (!cmd->iov_buf) {
|
||||
cmd->iov_buf = g_malloc(dcmd_size);
|
||||
memset(cmd->iov_buf, 0, dcmd_size);
|
||||
cmd->iov_buf = g_malloc0(dcmd_size);
|
||||
info = cmd->iov_buf;
|
||||
info->inquiry_data[0] = 0x7f; /* Force PQual 0x3, PType 0x1f */
|
||||
info->vpd_page83[0] = 0x7f;
|
||||
|
@ -1221,8 +1220,7 @@ static int megasas_ld_get_info_submit(SCSIDevice *sdev, int lun,
|
|||
uint64_t ld_size;
|
||||
|
||||
if (!cmd->iov_buf) {
|
||||
cmd->iov_buf = g_malloc(dcmd_size);
|
||||
memset(cmd->iov_buf, 0x0, dcmd_size);
|
||||
cmd->iov_buf = g_malloc0(dcmd_size);
|
||||
info = cmd->iov_buf;
|
||||
megasas_setup_inquiry(cdb, 0x83, sizeof(info->vpd_page83));
|
||||
req = scsi_req_new(sdev, cmd->index, lun, cdb, cmd);
|
||||
|
|
|
@ -49,6 +49,7 @@ do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0)
|
|||
|
||||
#define DEFAULT_DISCARD_GRANULARITY 4096
|
||||
#define DEFAULT_MAX_UNMAP_SIZE (1 << 30) /* 1 GB */
|
||||
#define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
|
||||
|
||||
typedef struct SCSIDiskState SCSIDiskState;
|
||||
|
||||
|
@ -79,6 +80,7 @@ struct SCSIDiskState
|
|||
uint64_t port_wwn;
|
||||
uint16_t port_index;
|
||||
uint64_t max_unmap_size;
|
||||
uint64_t max_io_size;
|
||||
QEMUBH *bh;
|
||||
char *version;
|
||||
char *serial;
|
||||
|
@ -635,6 +637,8 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
|
|||
s->qdev.conf.opt_io_size / s->qdev.blocksize;
|
||||
unsigned int max_unmap_sectors =
|
||||
s->max_unmap_size / s->qdev.blocksize;
|
||||
unsigned int max_io_sectors =
|
||||
s->max_io_size / s->qdev.blocksize;
|
||||
|
||||
if (s->qdev.type == TYPE_ROM) {
|
||||
DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n",
|
||||
|
@ -651,6 +655,12 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
|
|||
outbuf[6] = (min_io_size >> 8) & 0xff;
|
||||
outbuf[7] = min_io_size & 0xff;
|
||||
|
||||
/* maximum transfer length */
|
||||
outbuf[8] = (max_io_sectors >> 24) & 0xff;
|
||||
outbuf[9] = (max_io_sectors >> 16) & 0xff;
|
||||
outbuf[10] = (max_io_sectors >> 8) & 0xff;
|
||||
outbuf[11] = max_io_sectors & 0xff;
|
||||
|
||||
/* optimal transfer length */
|
||||
outbuf[12] = (opt_io_size >> 24) & 0xff;
|
||||
outbuf[13] = (opt_io_size >> 16) & 0xff;
|
||||
|
@ -674,6 +684,17 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
|
|||
outbuf[29] = (unmap_sectors >> 16) & 0xff;
|
||||
outbuf[30] = (unmap_sectors >> 8) & 0xff;
|
||||
outbuf[31] = unmap_sectors & 0xff;
|
||||
|
||||
/* max write same size */
|
||||
outbuf[36] = 0;
|
||||
outbuf[37] = 0;
|
||||
outbuf[38] = 0;
|
||||
outbuf[39] = 0;
|
||||
|
||||
outbuf[40] = (max_io_sectors >> 24) & 0xff;
|
||||
outbuf[41] = (max_io_sectors >> 16) & 0xff;
|
||||
outbuf[42] = (max_io_sectors >> 8) & 0xff;
|
||||
outbuf[43] = max_io_sectors & 0xff;
|
||||
break;
|
||||
}
|
||||
case 0xb2: /* thin provisioning */
|
||||
|
@ -2579,6 +2600,8 @@ static Property scsi_hd_properties[] = {
|
|||
DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
|
||||
DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
|
||||
DEFAULT_MAX_UNMAP_SIZE),
|
||||
DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
|
||||
DEFAULT_MAX_IO_SIZE),
|
||||
DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
@ -2625,6 +2648,8 @@ static Property scsi_cd_properties[] = {
|
|||
DEFINE_PROP_UINT64("wwn", SCSIDiskState, wwn, 0),
|
||||
DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, port_wwn, 0),
|
||||
DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
|
||||
DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
|
||||
DEFAULT_MAX_IO_SIZE),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
@ -2690,6 +2715,8 @@ static Property scsi_disk_properties[] = {
|
|||
DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
|
||||
DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
|
||||
DEFAULT_MAX_UNMAP_SIZE),
|
||||
DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
|
||||
DEFAULT_MAX_IO_SIZE),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
|
|
@ -298,8 +298,7 @@ static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
|
|||
#endif
|
||||
|
||||
if (r->req.cmd.xfer == 0) {
|
||||
if (r->buf != NULL)
|
||||
g_free(r->buf);
|
||||
g_free(r->buf);
|
||||
r->buflen = 0;
|
||||
r->buf = NULL;
|
||||
/* The request is used as the AIO opaque value, so add a ref. */
|
||||
|
@ -314,8 +313,7 @@ static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
|
|||
}
|
||||
|
||||
if (r->buflen != r->req.cmd.xfer) {
|
||||
if (r->buf != NULL)
|
||||
g_free(r->buf);
|
||||
g_free(r->buf);
|
||||
r->buf = g_malloc(r->req.cmd.xfer);
|
||||
r->buflen = r->req.cmd.xfer;
|
||||
}
|
||||
|
|
|
@ -829,7 +829,7 @@ void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
|
|||
virtio_cleanup(vdev);
|
||||
return;
|
||||
}
|
||||
s->cmd_vqs = g_malloc0(s->conf.num_queues * sizeof(VirtQueue *));
|
||||
s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues);
|
||||
s->sense_size = VIRTIO_SCSI_SENSE_SIZE;
|
||||
s->cdb_size = VIRTIO_SCSI_CDB_SIZE;
|
||||
|
||||
|
|
209
hw/sd/sdhci.c
209
hw/sd/sdhci.c
|
@ -74,10 +74,10 @@
|
|||
#define SDHC_CAPAB_MAXBLOCKLENGTH 512ul
|
||||
/* Maximum clock frequency for SDclock in MHz
|
||||
* value in range 10-63 MHz, 0 - not defined */
|
||||
#define SDHC_CAPAB_BASECLKFREQ 0ul
|
||||
#define SDHC_CAPAB_BASECLKFREQ 52ul
|
||||
#define SDHC_CAPAB_TOUNIT 1ul /* Timeout clock unit 0 - kHz, 1 - MHz */
|
||||
/* Timeout clock frequency 1-63, 0 - not defined */
|
||||
#define SDHC_CAPAB_TOCLKFREQ 0ul
|
||||
#define SDHC_CAPAB_TOCLKFREQ 52ul
|
||||
|
||||
/* Now check all parameters and calculate CAPABILITIES REGISTER value */
|
||||
#if SDHC_CAPAB_64BITBUS > 1 || SDHC_CAPAB_18V > 1 || SDHC_CAPAB_30V > 1 || \
|
||||
|
@ -198,12 +198,7 @@ static void sdhci_reset(SDHCIState *s)
|
|||
s->stopped_state = sdhc_not_stopped;
|
||||
}
|
||||
|
||||
static void sdhci_do_data_transfer(void *opaque)
|
||||
{
|
||||
SDHCIState *s = (SDHCIState *)opaque;
|
||||
|
||||
SDHCI_GET_CLASS(s)->data_transfer(s);
|
||||
}
|
||||
static void sdhci_data_transfer(void *opaque);
|
||||
|
||||
static void sdhci_send_command(SDHCIState *s)
|
||||
{
|
||||
|
@ -261,7 +256,7 @@ static void sdhci_send_command(SDHCIState *s)
|
|||
|
||||
if (s->blksize && (s->cmdreg & SDHC_CMD_DATA_PRESENT)) {
|
||||
s->data_count = 0;
|
||||
sdhci_do_data_transfer(s);
|
||||
sdhci_data_transfer(s);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -367,9 +362,9 @@ static uint32_t sdhci_read_dataport(SDHCIState *s, unsigned size)
|
|||
/* stop at gap request */
|
||||
(s->stopped_state == sdhc_gap_read &&
|
||||
!(s->prnsts & SDHC_DAT_LINE_ACTIVE))) {
|
||||
SDHCI_GET_CLASS(s)->end_data_transfer(s);
|
||||
sdhci_end_transfer(s);
|
||||
} else { /* if there are more data, read next block from card */
|
||||
SDHCI_GET_CLASS(s)->read_block_from_card(s);
|
||||
sdhci_read_block_from_card(s);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -410,7 +405,7 @@ static void sdhci_write_block_to_card(SDHCIState *s)
|
|||
if ((s->trnmod & SDHC_TRNS_MULTI) == 0 ||
|
||||
((s->trnmod & SDHC_TRNS_MULTI) &&
|
||||
(s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0))) {
|
||||
SDHCI_GET_CLASS(s)->end_data_transfer(s);
|
||||
sdhci_end_transfer(s);
|
||||
} else if (s->norintstsen & SDHC_NISEN_WBUFRDY) {
|
||||
s->norintsts |= SDHC_NIS_WBUFRDY;
|
||||
}
|
||||
|
@ -422,7 +417,7 @@ static void sdhci_write_block_to_card(SDHCIState *s)
|
|||
if (s->norintstsen & SDHC_EISEN_BLKGAP) {
|
||||
s->norintsts |= SDHC_EIS_BLKGAP;
|
||||
}
|
||||
SDHCI_GET_CLASS(s)->end_data_transfer(s);
|
||||
sdhci_end_transfer(s);
|
||||
}
|
||||
|
||||
sdhci_update_irq(s);
|
||||
|
@ -450,7 +445,7 @@ static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size)
|
|||
s->data_count = 0;
|
||||
s->prnsts &= ~SDHC_SPACE_AVAILABLE;
|
||||
if (s->prnsts & SDHC_DOING_WRITE) {
|
||||
SDHCI_GET_CLASS(s)->write_block_to_card(s);
|
||||
sdhci_write_block_to_card(s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -537,7 +532,7 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s)
|
|||
}
|
||||
|
||||
if (s->blkcnt == 0) {
|
||||
SDHCI_GET_CLASS(s)->end_data_transfer(s);
|
||||
sdhci_end_transfer(s);
|
||||
} else {
|
||||
if (s->norintstsen & SDHC_NISEN_DMA) {
|
||||
s->norintsts |= SDHC_NIS_DMA;
|
||||
|
@ -571,7 +566,7 @@ static void sdhci_sdma_transfer_single_block(SDHCIState *s)
|
|||
s->blkcnt--;
|
||||
}
|
||||
|
||||
SDHCI_GET_CLASS(s)->end_data_transfer(s);
|
||||
sdhci_end_transfer(s);
|
||||
}
|
||||
|
||||
typedef struct ADMADescr {
|
||||
|
@ -758,7 +753,7 @@ static void sdhci_do_adma(SDHCIState *s)
|
|||
|
||||
sdhci_update_irq(s);
|
||||
}
|
||||
SDHCI_GET_CLASS(s)->end_data_transfer(s);
|
||||
sdhci_end_transfer(s);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -771,9 +766,9 @@ static void sdhci_do_adma(SDHCIState *s)
|
|||
|
||||
/* Perform data transfer according to controller configuration */
|
||||
|
||||
static void sdhci_data_transfer(SDHCIState *s)
|
||||
static void sdhci_data_transfer(void *opaque)
|
||||
{
|
||||
SDHCIClass *k = SDHCI_GET_CLASS(s);
|
||||
SDHCIState *s = (SDHCIState *)opaque;
|
||||
|
||||
if (s->trnmod & SDHC_TRNS_DMA) {
|
||||
switch (SDHC_DMA_TYPE(s->hostctl)) {
|
||||
|
@ -784,9 +779,9 @@ static void sdhci_data_transfer(SDHCIState *s)
|
|||
}
|
||||
|
||||
if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) {
|
||||
k->do_sdma_single(s);
|
||||
sdhci_sdma_transfer_single_block(s);
|
||||
} else {
|
||||
k->do_sdma_multi(s);
|
||||
sdhci_sdma_transfer_multi_blocks(s);
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -796,7 +791,7 @@ static void sdhci_data_transfer(SDHCIState *s)
|
|||
break;
|
||||
}
|
||||
|
||||
k->do_adma(s);
|
||||
sdhci_do_adma(s);
|
||||
break;
|
||||
case SDHC_CTRL_ADMA2_32:
|
||||
if (!(s->capareg & SDHC_CAN_DO_ADMA2)) {
|
||||
|
@ -804,7 +799,7 @@ static void sdhci_data_transfer(SDHCIState *s)
|
|||
break;
|
||||
}
|
||||
|
||||
k->do_adma(s);
|
||||
sdhci_do_adma(s);
|
||||
break;
|
||||
case SDHC_CTRL_ADMA2_64:
|
||||
if (!(s->capareg & SDHC_CAN_DO_ADMA2) ||
|
||||
|
@ -813,7 +808,7 @@ static void sdhci_data_transfer(SDHCIState *s)
|
|||
break;
|
||||
}
|
||||
|
||||
k->do_adma(s);
|
||||
sdhci_do_adma(s);
|
||||
break;
|
||||
default:
|
||||
ERRPRINT("Unsupported DMA type\n");
|
||||
|
@ -823,11 +818,11 @@ static void sdhci_data_transfer(SDHCIState *s)
|
|||
if ((s->trnmod & SDHC_TRNS_READ) && sd_data_ready(s->card)) {
|
||||
s->prnsts |= SDHC_DOING_READ | SDHC_DATA_INHIBIT |
|
||||
SDHC_DAT_LINE_ACTIVE;
|
||||
SDHCI_GET_CLASS(s)->read_block_from_card(s);
|
||||
sdhci_read_block_from_card(s);
|
||||
} else {
|
||||
s->prnsts |= SDHC_DOING_WRITE | SDHC_DAT_LINE_ACTIVE |
|
||||
SDHC_SPACE_AVAILABLE | SDHC_DATA_INHIBIT;
|
||||
SDHCI_GET_CLASS(s)->write_block_to_card(s);
|
||||
sdhci_write_block_to_card(s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -858,8 +853,9 @@ sdhci_buff_access_is_sequential(SDHCIState *s, unsigned byte_num)
|
|||
return true;
|
||||
}
|
||||
|
||||
static uint32_t sdhci_read(SDHCIState *s, unsigned int offset, unsigned size)
|
||||
static uint64_t sdhci_read(void *opaque, hwaddr offset, unsigned size)
|
||||
{
|
||||
SDHCIState *s = (SDHCIState *)opaque;
|
||||
uint32_t ret = 0;
|
||||
|
||||
switch (offset & ~0x3) {
|
||||
|
@ -880,8 +876,8 @@ static uint32_t sdhci_read(SDHCIState *s, unsigned int offset, unsigned size)
|
|||
break;
|
||||
case SDHC_BDATA:
|
||||
if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) {
|
||||
ret = SDHCI_GET_CLASS(s)->bdata_read(s, size);
|
||||
DPRINT_L2("read %ub: addr[0x%04x] -> %u(0x%x)\n", size, offset,
|
||||
ret = sdhci_read_dataport(s, size);
|
||||
DPRINT_L2("read %ub: addr[0x%04x] -> %u(0x%x)\n", size, (int)offset,
|
||||
ret, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -927,13 +923,13 @@ static uint32_t sdhci_read(SDHCIState *s, unsigned int offset, unsigned size)
|
|||
ret = (SD_HOST_SPECv2_VERS << 16) | sdhci_slotint(s);
|
||||
break;
|
||||
default:
|
||||
ERRPRINT("bad %ub read: addr[0x%04x]\n", size, offset);
|
||||
ERRPRINT("bad %ub read: addr[0x%04x]\n", size, (int)offset);
|
||||
break;
|
||||
}
|
||||
|
||||
ret >>= (offset & 0x3) * 8;
|
||||
ret &= (1ULL << (size * 8)) - 1;
|
||||
DPRINT_L2("read %ub: addr[0x%04x] -> %u(0x%x)\n", size, offset, ret, ret);
|
||||
DPRINT_L2("read %ub: addr[0x%04x] -> %u(0x%x)\n", size, (int)offset, ret, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -948,10 +944,10 @@ static inline void sdhci_blkgap_write(SDHCIState *s, uint8_t value)
|
|||
(s->blkgap & SDHC_STOP_AT_GAP_REQ) == 0) {
|
||||
if (s->stopped_state == sdhc_gap_read) {
|
||||
s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ;
|
||||
SDHCI_GET_CLASS(s)->read_block_from_card(s);
|
||||
sdhci_read_block_from_card(s);
|
||||
} else {
|
||||
s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_WRITE;
|
||||
SDHCI_GET_CLASS(s)->write_block_to_card(s);
|
||||
sdhci_write_block_to_card(s);
|
||||
}
|
||||
s->stopped_state = sdhc_not_stopped;
|
||||
} else if (!s->stopped_state && (value & SDHC_STOP_AT_GAP_REQ)) {
|
||||
|
@ -967,7 +963,7 @@ static inline void sdhci_reset_write(SDHCIState *s, uint8_t value)
|
|||
{
|
||||
switch (value) {
|
||||
case SDHC_RESET_ALL:
|
||||
DEVICE_GET_CLASS(s)->reset(DEVICE(s));
|
||||
sdhci_reset(s);
|
||||
break;
|
||||
case SDHC_RESET_CMD:
|
||||
s->prnsts &= ~SDHC_CMD_INHIBIT;
|
||||
|
@ -987,10 +983,12 @@ static inline void sdhci_reset_write(SDHCIState *s, uint8_t value)
|
|||
}
|
||||
|
||||
static void
|
||||
sdhci_write(SDHCIState *s, unsigned int offset, uint32_t value, unsigned size)
|
||||
sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
|
||||
{
|
||||
SDHCIState *s = (SDHCIState *)opaque;
|
||||
unsigned shift = 8 * (offset & 0x3);
|
||||
uint32_t mask = ~(((1ULL << (size * 8)) - 1) << shift);
|
||||
uint32_t value = val;
|
||||
value <<= shift;
|
||||
|
||||
switch (offset & ~0x3) {
|
||||
|
@ -1000,7 +998,7 @@ sdhci_write(SDHCIState *s, unsigned int offset, uint32_t value, unsigned size)
|
|||
/* Writing to last byte of sdmasysad might trigger transfer */
|
||||
if (!(mask & 0xFF000000) && TRANSFERRING_DATA(s->prnsts) && s->blkcnt &&
|
||||
s->blksize && SDHC_DMA_TYPE(s->hostctl) == SDHC_CTRL_SDMA) {
|
||||
SDHCI_GET_CLASS(s)->do_sdma_multi(s);
|
||||
sdhci_sdma_transfer_multi_blocks(s);
|
||||
}
|
||||
break;
|
||||
case SDHC_BLKSIZE:
|
||||
|
@ -1022,15 +1020,15 @@ sdhci_write(SDHCIState *s, unsigned int offset, uint32_t value, unsigned size)
|
|||
MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16);
|
||||
|
||||
/* Writing to the upper byte of CMDREG triggers SD command generation */
|
||||
if ((mask & 0xFF000000) || !SDHCI_GET_CLASS(s)->can_issue_command(s)) {
|
||||
if ((mask & 0xFF000000) || !sdhci_can_issue_command(s)) {
|
||||
break;
|
||||
}
|
||||
|
||||
SDHCI_GET_CLASS(s)->send_command(s);
|
||||
sdhci_send_command(s);
|
||||
break;
|
||||
case SDHC_BDATA:
|
||||
if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) {
|
||||
SDHCI_GET_CLASS(s)->bdata_write(s, value >> shift, size);
|
||||
sdhci_write_dataport(s, value >> shift, size);
|
||||
}
|
||||
break;
|
||||
case SDHC_HOSTCTL:
|
||||
|
@ -1111,32 +1109,16 @@ sdhci_write(SDHCIState *s, unsigned int offset, uint32_t value, unsigned size)
|
|||
break;
|
||||
default:
|
||||
ERRPRINT("bad %ub write offset: addr[0x%04x] <- %u(0x%x)\n",
|
||||
size, offset, value >> shift, value >> shift);
|
||||
size, (int)offset, value >> shift, value >> shift);
|
||||
break;
|
||||
}
|
||||
DPRINT_L2("write %ub: addr[0x%04x] <- %u(0x%x)\n",
|
||||
size, offset, value >> shift, value >> shift);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
sdhci_readfn(void *opaque, hwaddr offset, unsigned size)
|
||||
{
|
||||
SDHCIState *s = (SDHCIState *)opaque;
|
||||
|
||||
return SDHCI_GET_CLASS(s)->mem_read(s, offset, size);
|
||||
}
|
||||
|
||||
static void
|
||||
sdhci_writefn(void *opaque, hwaddr off, uint64_t val, unsigned sz)
|
||||
{
|
||||
SDHCIState *s = (SDHCIState *)opaque;
|
||||
|
||||
SDHCI_GET_CLASS(s)->mem_write(s, off, val, sz);
|
||||
size, (int)offset, value >> shift, value >> shift);
|
||||
}
|
||||
|
||||
static const MemoryRegionOps sdhci_mmio_ops = {
|
||||
.read = sdhci_readfn,
|
||||
.write = sdhci_writefn,
|
||||
.read = sdhci_read,
|
||||
.write = sdhci_write,
|
||||
.valid = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 4,
|
||||
|
@ -1160,9 +1142,8 @@ static inline unsigned int sdhci_get_fifolen(SDHCIState *s)
|
|||
}
|
||||
}
|
||||
|
||||
static void sdhci_initfn(Object *obj)
|
||||
static void sdhci_initfn(SDHCIState *s)
|
||||
{
|
||||
SDHCIState *s = SDHCI(obj);
|
||||
DriveInfo *di;
|
||||
|
||||
di = drive_get_next(IF_SD);
|
||||
|
@ -1175,13 +1156,11 @@ static void sdhci_initfn(Object *obj)
|
|||
sd_set_cb(s->card, s->ro_cb, s->eject_cb);
|
||||
|
||||
s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s);
|
||||
s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_do_data_transfer, s);
|
||||
s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s);
|
||||
}
|
||||
|
||||
static void sdhci_uninitfn(Object *obj)
|
||||
static void sdhci_uninitfn(SDHCIState *s)
|
||||
{
|
||||
SDHCIState *s = SDHCI(obj);
|
||||
|
||||
timer_del(s->insert_timer);
|
||||
timer_free(s->insert_timer);
|
||||
timer_del(s->transfer_timer);
|
||||
|
@ -1241,9 +1220,64 @@ static Property sdhci_properties[] = {
|
|||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void sdhci_realize(DeviceState *dev, Error ** errp)
|
||||
static int sdhci_pci_init(PCIDevice *dev)
|
||||
{
|
||||
SDHCIState *s = SDHCI(dev);
|
||||
SDHCIState *s = PCI_SDHCI(dev);
|
||||
dev->config[PCI_CLASS_PROG] = 0x01; /* Standard Host supported DMA */
|
||||
dev->config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */
|
||||
sdhci_initfn(s);
|
||||
s->buf_maxsz = sdhci_get_fifolen(s);
|
||||
s->fifo_buffer = g_malloc0(s->buf_maxsz);
|
||||
s->irq = pci_allocate_irq(dev);
|
||||
memory_region_init_io(&s->iomem, OBJECT(s), &sdhci_mmio_ops, s, "sdhci",
|
||||
SDHC_REGISTERS_MAP_SIZE);
|
||||
pci_register_bar(dev, 0, 0, &s->iomem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sdhci_pci_exit(PCIDevice *dev)
|
||||
{
|
||||
SDHCIState *s = PCI_SDHCI(dev);
|
||||
sdhci_uninitfn(s);
|
||||
}
|
||||
|
||||
static void sdhci_pci_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
|
||||
|
||||
k->init = sdhci_pci_init;
|
||||
k->exit = sdhci_pci_exit;
|
||||
k->vendor_id = PCI_VENDOR_ID_REDHAT;
|
||||
k->device_id = PCI_DEVICE_ID_REDHAT_SDHCI;
|
||||
k->class_id = PCI_CLASS_SYSTEM_SDHCI;
|
||||
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
|
||||
dc->vmsd = &sdhci_vmstate;
|
||||
dc->props = sdhci_properties;
|
||||
}
|
||||
|
||||
static const TypeInfo sdhci_pci_info = {
|
||||
.name = TYPE_PCI_SDHCI,
|
||||
.parent = TYPE_PCI_DEVICE,
|
||||
.instance_size = sizeof(SDHCIState),
|
||||
.class_init = sdhci_pci_class_init,
|
||||
};
|
||||
|
||||
static void sdhci_sysbus_init(Object *obj)
|
||||
{
|
||||
SDHCIState *s = SYSBUS_SDHCI(obj);
|
||||
sdhci_initfn(s);
|
||||
}
|
||||
|
||||
static void sdhci_sysbus_finalize(Object *obj)
|
||||
{
|
||||
SDHCIState *s = SYSBUS_SDHCI(obj);
|
||||
sdhci_uninitfn(s);
|
||||
}
|
||||
|
||||
static void sdhci_sysbus_realize(DeviceState *dev, Error ** errp)
|
||||
{
|
||||
SDHCIState *s = SYSBUS_SDHCI(dev);
|
||||
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
|
||||
|
||||
s->buf_maxsz = sdhci_get_fifolen(s);
|
||||
|
@ -1254,51 +1288,28 @@ static void sdhci_realize(DeviceState *dev, Error ** errp)
|
|||
sysbus_init_mmio(sbd, &s->iomem);
|
||||
}
|
||||
|
||||
static void sdhci_generic_reset(DeviceState *ds)
|
||||
{
|
||||
SDHCIState *s = SDHCI(ds);
|
||||
SDHCI_GET_CLASS(s)->reset(s);
|
||||
}
|
||||
|
||||
static void sdhci_class_init(ObjectClass *klass, void *data)
|
||||
static void sdhci_sysbus_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
SDHCIClass *k = SDHCI_CLASS(klass);
|
||||
|
||||
dc->vmsd = &sdhci_vmstate;
|
||||
dc->props = sdhci_properties;
|
||||
dc->reset = sdhci_generic_reset;
|
||||
dc->realize = sdhci_realize;
|
||||
|
||||
k->reset = sdhci_reset;
|
||||
k->mem_read = sdhci_read;
|
||||
k->mem_write = sdhci_write;
|
||||
k->send_command = sdhci_send_command;
|
||||
k->can_issue_command = sdhci_can_issue_command;
|
||||
k->data_transfer = sdhci_data_transfer;
|
||||
k->end_data_transfer = sdhci_end_transfer;
|
||||
k->do_sdma_single = sdhci_sdma_transfer_single_block;
|
||||
k->do_sdma_multi = sdhci_sdma_transfer_multi_blocks;
|
||||
k->do_adma = sdhci_do_adma;
|
||||
k->read_block_from_card = sdhci_read_block_from_card;
|
||||
k->write_block_to_card = sdhci_write_block_to_card;
|
||||
k->bdata_read = sdhci_read_dataport;
|
||||
k->bdata_write = sdhci_write_dataport;
|
||||
dc->realize = sdhci_sysbus_realize;
|
||||
}
|
||||
|
||||
static const TypeInfo sdhci_type_info = {
|
||||
.name = TYPE_SDHCI,
|
||||
static const TypeInfo sdhci_sysbus_info = {
|
||||
.name = TYPE_SYSBUS_SDHCI,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(SDHCIState),
|
||||
.instance_init = sdhci_initfn,
|
||||
.instance_finalize = sdhci_uninitfn,
|
||||
.class_init = sdhci_class_init,
|
||||
.class_size = sizeof(SDHCIClass)
|
||||
.instance_init = sdhci_sysbus_init,
|
||||
.instance_finalize = sdhci_sysbus_finalize,
|
||||
.class_init = sdhci_sysbus_class_init,
|
||||
};
|
||||
|
||||
static void sdhci_register_types(void)
|
||||
{
|
||||
type_register_static(&sdhci_type_info);
|
||||
type_register_static(&sdhci_pci_info);
|
||||
type_register_static(&sdhci_sysbus_info);
|
||||
}
|
||||
|
||||
type_init(sdhci_register_types)
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define SDHCI_H
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "hw/pci/pci.h"
|
||||
#include "hw/sysbus.h"
|
||||
#include "hw/sd.h"
|
||||
|
||||
|
@ -232,7 +233,10 @@ enum {
|
|||
|
||||
/* SD/MMC host controller state */
|
||||
typedef struct SDHCIState {
|
||||
SysBusDevice busdev;
|
||||
union {
|
||||
PCIDevice pcidev;
|
||||
SysBusDevice busdev;
|
||||
};
|
||||
SDState *card;
|
||||
MemoryRegion iomem;
|
||||
|
||||
|
@ -279,34 +283,13 @@ typedef struct SDHCIState {
|
|||
/* RO Host Controller Version Register always reads as 0x2401 */
|
||||
} SDHCIState;
|
||||
|
||||
typedef struct SDHCIClass {
|
||||
SysBusDeviceClass busdev_class;
|
||||
|
||||
void (*reset)(SDHCIState *s);
|
||||
uint32_t (*mem_read)(SDHCIState *s, unsigned int offset, unsigned size);
|
||||
void (*mem_write)(SDHCIState *s, unsigned int offset, uint32_t value,
|
||||
unsigned size);
|
||||
void (*send_command)(SDHCIState *s);
|
||||
bool (*can_issue_command)(SDHCIState *s);
|
||||
void (*data_transfer)(SDHCIState *s);
|
||||
void (*end_data_transfer)(SDHCIState *s);
|
||||
void (*do_sdma_single)(SDHCIState *s);
|
||||
void (*do_sdma_multi)(SDHCIState *s);
|
||||
void (*do_adma)(SDHCIState *s);
|
||||
void (*read_block_from_card)(SDHCIState *s);
|
||||
void (*write_block_to_card)(SDHCIState *s);
|
||||
uint32_t (*bdata_read)(SDHCIState *s, unsigned size);
|
||||
void (*bdata_write)(SDHCIState *s, uint32_t value, unsigned size);
|
||||
} SDHCIClass;
|
||||
|
||||
extern const VMStateDescription sdhci_vmstate;
|
||||
|
||||
#define TYPE_SDHCI "generic-sdhci"
|
||||
#define SDHCI(obj) \
|
||||
OBJECT_CHECK(SDHCIState, (obj), TYPE_SDHCI)
|
||||
#define SDHCI_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(SDHCIClass, (klass), TYPE_SDHCI)
|
||||
#define SDHCI_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(SDHCIClass, (obj), TYPE_SDHCI)
|
||||
#define TYPE_PCI_SDHCI "sdhci-pci"
|
||||
#define PCI_SDHCI(obj) OBJECT_CHECK(SDHCIState, (obj), TYPE_PCI_SDHCI)
|
||||
|
||||
#define TYPE_SYSBUS_SDHCI "generic-sdhci"
|
||||
#define SYSBUS_SDHCI(obj) \
|
||||
OBJECT_CHECK(SDHCIState, (obj), TYPE_SYSBUS_SDHCI)
|
||||
|
||||
#endif /* SDHCI_H */
|
||||
|
|
|
@ -145,6 +145,7 @@ struct TranslationBlock {
|
|||
uint16_t cflags; /* compile flags */
|
||||
#define CF_COUNT_MASK 0x7fff
|
||||
#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
|
||||
#define CF_NOCACHE 0x10000 /* To be freed after execution */
|
||||
|
||||
void *tc_ptr; /* pointer to the translated code */
|
||||
/* next matching tb for physical address. */
|
||||
|
|
|
@ -89,6 +89,7 @@ typedef struct APICCommonClass
|
|||
void (*external_nmi)(APICCommonState *s);
|
||||
void (*pre_save)(APICCommonState *s);
|
||||
void (*post_load)(APICCommonState *s);
|
||||
void (*reset)(APICCommonState *s);
|
||||
} APICCommonClass;
|
||||
|
||||
struct APICCommonState {
|
||||
|
|
|
@ -88,6 +88,7 @@
|
|||
#define PCI_DEVICE_ID_REDHAT_SERIAL2 0x0003
|
||||
#define PCI_DEVICE_ID_REDHAT_SERIAL4 0x0004
|
||||
#define PCI_DEVICE_ID_REDHAT_TEST 0x0005
|
||||
#define PCI_DEVICE_ID_REDHAT_SDHCI 0x0006
|
||||
#define PCI_DEVICE_ID_REDHAT_QXL 0x0100
|
||||
|
||||
#define FMT_PCIBUS PRIx64
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
|
||||
#define PCI_CLASS_MEMORY_RAM 0x0500
|
||||
|
||||
#define PCI_CLASS_SYSTEM_SDHCI 0x0805
|
||||
#define PCI_CLASS_SYSTEM_OTHER 0x0880
|
||||
|
||||
#define PCI_CLASS_SERIAL_USB 0x0c03
|
||||
|
|
|
@ -36,12 +36,20 @@
|
|||
* is suspended, and it will reflect system time changes the host may
|
||||
* undergo (e.g. due to NTP). The host clock has the same precision as
|
||||
* the virtual clock.
|
||||
*
|
||||
* @QEMU_CLOCK_VIRTUAL_RT: realtime clock used for icount warp
|
||||
*
|
||||
* Outside icount mode, this clock is the same as @QEMU_CLOCK_VIRTUAL.
|
||||
* In icount mode, this clock counts nanoseconds while the virtual
|
||||
* machine is running. It is used to increase @QEMU_CLOCK_VIRTUAL
|
||||
* while the CPUs are sleeping and thus not executing instructions.
|
||||
*/
|
||||
|
||||
typedef enum {
|
||||
QEMU_CLOCK_REALTIME = 0,
|
||||
QEMU_CLOCK_VIRTUAL = 1,
|
||||
QEMU_CLOCK_HOST = 2,
|
||||
QEMU_CLOCK_VIRTUAL_RT = 3,
|
||||
QEMU_CLOCK_MAX
|
||||
} QEMUClockType;
|
||||
|
||||
|
@ -743,6 +751,7 @@ static inline int64_t get_clock(void)
|
|||
#endif
|
||||
|
||||
/* icount */
|
||||
int64_t cpu_get_icount_raw(void);
|
||||
int64_t cpu_get_icount(void);
|
||||
int64_t cpu_get_clock(void);
|
||||
int64_t cpu_get_clock_offset(void);
|
||||
|
|
|
@ -45,6 +45,7 @@ extern bool kvm_async_interrupts_allowed;
|
|||
extern bool kvm_halt_in_kernel_allowed;
|
||||
extern bool kvm_eventfds_allowed;
|
||||
extern bool kvm_irqfds_allowed;
|
||||
extern bool kvm_resamplefds_allowed;
|
||||
extern bool kvm_msi_via_irqfd_allowed;
|
||||
extern bool kvm_gsi_routing_allowed;
|
||||
extern bool kvm_gsi_direct_mapping;
|
||||
|
@ -101,6 +102,15 @@ extern bool kvm_readonly_mem_allowed;
|
|||
*/
|
||||
#define kvm_irqfds_enabled() (kvm_irqfds_allowed)
|
||||
|
||||
/**
|
||||
* kvm_resamplefds_enabled:
|
||||
*
|
||||
* Returns: true if we can use resamplefds to inject interrupts into
|
||||
* a KVM CPU (ie the kernel supports resamplefds and we are running
|
||||
* with a configuration where it is meaningful to use them).
|
||||
*/
|
||||
#define kvm_resamplefds_enabled() (kvm_resamplefds_allowed)
|
||||
|
||||
/**
|
||||
* kvm_msi_via_irqfd_enabled:
|
||||
*
|
||||
|
|
11
kvm-all.c
11
kvm-all.c
|
@ -120,6 +120,7 @@ bool kvm_async_interrupts_allowed;
|
|||
bool kvm_halt_in_kernel_allowed;
|
||||
bool kvm_eventfds_allowed;
|
||||
bool kvm_irqfds_allowed;
|
||||
bool kvm_resamplefds_allowed;
|
||||
bool kvm_msi_via_irqfd_allowed;
|
||||
bool kvm_gsi_routing_allowed;
|
||||
bool kvm_gsi_direct_mapping;
|
||||
|
@ -416,7 +417,7 @@ static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
|
|||
{
|
||||
KVMState *s = kvm_state;
|
||||
unsigned long size, allocated_size = 0;
|
||||
KVMDirtyLog d;
|
||||
KVMDirtyLog d = {};
|
||||
KVMSlot *mem;
|
||||
int ret = 0;
|
||||
hwaddr start_addr = section->offset_within_address_space;
|
||||
|
@ -1276,7 +1277,7 @@ static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
|
|||
|
||||
int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
|
||||
{
|
||||
struct kvm_irq_routing_entry kroute;
|
||||
struct kvm_irq_routing_entry kroute = {};
|
||||
int virq;
|
||||
|
||||
if (!kvm_gsi_routing_enabled()) {
|
||||
|
@ -1584,6 +1585,12 @@ static int kvm_init(MachineState *ms)
|
|||
kvm_eventfds_allowed =
|
||||
(kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
|
||||
|
||||
kvm_irqfds_allowed =
|
||||
(kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
|
||||
|
||||
kvm_resamplefds_allowed =
|
||||
(kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
|
||||
|
||||
ret = kvm_arch_init(s);
|
||||
if (ret < 0) {
|
||||
goto err;
|
||||
|
|
Binary file not shown.
|
@ -76,7 +76,31 @@ boot_kernel:
|
|||
|
||||
|
||||
copy_kernel:
|
||||
/* Compute initrd address */
|
||||
/* Read info block in low memory (0x10000 or 0x90000) */
|
||||
read_fw FW_CFG_SETUP_ADDR
|
||||
shr $4, %eax
|
||||
mov %eax, %es
|
||||
xor %edi, %edi
|
||||
read_fw_blob_addr32_edi(FW_CFG_SETUP)
|
||||
|
||||
cmpw $0x203, %es:0x206 // if protocol >= 0x203
|
||||
jae 1f // have initrd_max
|
||||
movl $0x37ffffff, %es:0x22c // else assume 0x37ffffff
|
||||
1:
|
||||
|
||||
/* Check if using kernel-specified initrd address */
|
||||
read_fw FW_CFG_INITRD_ADDR
|
||||
mov %eax, %edi // (load_kernel wants it in %edi)
|
||||
read_fw FW_CFG_INITRD_SIZE // find end of initrd
|
||||
add %edi, %eax
|
||||
xor %es:0x22c, %eax // if it matches es:0x22c
|
||||
and $-4096, %eax // (apart from padding for page)
|
||||
jz load_kernel // then initrd is not at top
|
||||
// of memory
|
||||
|
||||
/* pc.c placed the initrd at end of memory. Compute a better
|
||||
* initrd address based on e801 data.
|
||||
*/
|
||||
mov $0xe801, %ax
|
||||
xor %cx, %cx
|
||||
xor %dx, %dx
|
||||
|
@ -107,7 +131,9 @@ copy_kernel:
|
|||
read_fw FW_CFG_INITRD_SIZE
|
||||
subl %eax, %edi
|
||||
andl $-4096, %edi /* EDI = start of initrd */
|
||||
movl %edi, %es:0x218 /* put it in the header */
|
||||
|
||||
load_kernel:
|
||||
/* We need to load the kernel into memory we can't access in 16 bit
|
||||
mode, so let's get into 32 bit mode, write the kernel and jump
|
||||
back again. */
|
||||
|
@ -139,19 +165,10 @@ copy_kernel:
|
|||
/* We're now running in 16-bit CS, but 32-bit ES! */
|
||||
|
||||
/* Load kernel and initrd */
|
||||
pushl %edi
|
||||
read_fw_blob_addr32_edi(FW_CFG_INITRD)
|
||||
read_fw_blob_addr32(FW_CFG_KERNEL)
|
||||
read_fw_blob_addr32(FW_CFG_CMDLINE)
|
||||
|
||||
read_fw FW_CFG_SETUP_ADDR
|
||||
mov %eax, %edi
|
||||
mov %eax, %ebx
|
||||
read_fw_blob_addr32_edi(FW_CFG_SETUP)
|
||||
|
||||
/* Update the header with the initrd address we chose above */
|
||||
popl %es:0x218(%ebx)
|
||||
|
||||
/* And now jump into Linux! */
|
||||
mov $0, %eax
|
||||
mov %eax, %cr0
|
||||
|
|
|
@ -573,6 +573,8 @@ int64_t qemu_clock_get_ns(QEMUClockType type)
|
|||
notifier_list_notify(&clock->reset_notifiers, &now);
|
||||
}
|
||||
return now;
|
||||
case QEMU_CLOCK_VIRTUAL_RT:
|
||||
return cpu_get_clock();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -78,9 +78,7 @@ static int x86_64_write_elf64_note(WriteCoreDumpFunction f,
|
|||
descsz = sizeof(x86_64_elf_prstatus);
|
||||
note_size = ((sizeof(Elf64_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
|
||||
(descsz + 3) / 4) * 4;
|
||||
note = g_malloc(note_size);
|
||||
|
||||
memset(note, 0, note_size);
|
||||
note = g_malloc0(note_size);
|
||||
note->n_namesz = cpu_to_le32(name_size);
|
||||
note->n_descsz = cpu_to_le32(descsz);
|
||||
note->n_type = cpu_to_le32(NT_PRSTATUS);
|
||||
|
@ -159,9 +157,7 @@ static int x86_write_elf64_note(WriteCoreDumpFunction f, CPUX86State *env,
|
|||
descsz = sizeof(x86_elf_prstatus);
|
||||
note_size = ((sizeof(Elf64_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
|
||||
(descsz + 3) / 4) * 4;
|
||||
note = g_malloc(note_size);
|
||||
|
||||
memset(note, 0, note_size);
|
||||
note = g_malloc0(note_size);
|
||||
note->n_namesz = cpu_to_le32(name_size);
|
||||
note->n_descsz = cpu_to_le32(descsz);
|
||||
note->n_type = cpu_to_le32(NT_PRSTATUS);
|
||||
|
@ -216,9 +212,7 @@ int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
|
|||
descsz = sizeof(x86_elf_prstatus);
|
||||
note_size = ((sizeof(Elf32_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
|
||||
(descsz + 3) / 4) * 4;
|
||||
note = g_malloc(note_size);
|
||||
|
||||
memset(note, 0, note_size);
|
||||
note = g_malloc0(note_size);
|
||||
note->n_namesz = cpu_to_le32(name_size);
|
||||
note->n_descsz = cpu_to_le32(descsz);
|
||||
note->n_type = cpu_to_le32(NT_PRSTATUS);
|
||||
|
@ -345,9 +339,7 @@ static inline int cpu_write_qemu_note(WriteCoreDumpFunction f,
|
|||
}
|
||||
note_size = ((note_head_size + 3) / 4 + (name_size + 3) / 4 +
|
||||
(descsz + 3) / 4) * 4;
|
||||
note = g_malloc(note_size);
|
||||
|
||||
memset(note, 0, note_size);
|
||||
note = g_malloc0(note_size);
|
||||
if (type == 0) {
|
||||
note32 = note;
|
||||
note32->n_namesz = cpu_to_le32(name_size);
|
||||
|
|
|
@ -274,6 +274,17 @@ static const char *cpuid_apm_edx_feature_name[] = {
|
|||
NULL, NULL, NULL, NULL,
|
||||
};
|
||||
|
||||
static const char *cpuid_xsave_feature_name[] = {
|
||||
"xsaveopt", "xsavec", "xgetbv1", "xsaves",
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
};
|
||||
|
||||
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
|
||||
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
|
||||
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
|
||||
|
@ -391,6 +402,13 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
|
|||
.tcg_features = TCG_APM_FEATURES,
|
||||
.unmigratable_flags = CPUID_APM_INVTSC,
|
||||
},
|
||||
[FEAT_XSAVE] = {
|
||||
.feat_names = cpuid_xsave_feature_name,
|
||||
.cpuid_eax = 0xd,
|
||||
.cpuid_needs_ecx = true, .cpuid_ecx = 1,
|
||||
.cpuid_reg = R_EAX,
|
||||
.tcg_features = 0,
|
||||
},
|
||||
};
|
||||
|
||||
typedef struct X86RegisterInfo32 {
|
||||
|
@ -742,9 +760,9 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.family = 15,
|
||||
.model = 6,
|
||||
.stepping = 1,
|
||||
/* Missing: CPUID_VME, CPUID_HT */
|
||||
/* Missing: CPUID_HT */
|
||||
.features[FEAT_1_EDX] =
|
||||
PPRO_FEATURES |
|
||||
PPRO_FEATURES | CPUID_VME |
|
||||
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
|
||||
CPUID_PSE36,
|
||||
/* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
|
||||
|
@ -784,7 +802,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 6,
|
||||
.stepping = 1,
|
||||
.features[FEAT_1_EDX] =
|
||||
PPRO_FEATURES |
|
||||
PPRO_FEATURES | CPUID_VME |
|
||||
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
|
||||
.features[FEAT_1_ECX] =
|
||||
CPUID_EXT_SSE3,
|
||||
|
@ -910,7 +928,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 15,
|
||||
.stepping = 3,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
|
@ -932,7 +950,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 23,
|
||||
.stepping = 3,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
|
@ -955,7 +973,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 26,
|
||||
.stepping = 3,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
|
@ -978,7 +996,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 44,
|
||||
.stepping = 1,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
|
@ -1002,7 +1020,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 42,
|
||||
.stepping = 1,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
|
@ -1018,9 +1036,43 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
CPUID_EXT2_SYSCALL,
|
||||
.features[FEAT_8000_0001_ECX] =
|
||||
CPUID_EXT3_LAHF_LM,
|
||||
.features[FEAT_XSAVE] =
|
||||
CPUID_XSAVE_XSAVEOPT,
|
||||
.xlevel = 0x8000000A,
|
||||
.model_id = "Intel Xeon E312xx (Sandy Bridge)",
|
||||
},
|
||||
{
|
||||
.name = "IvyBridge",
|
||||
.level = 0xd,
|
||||
.vendor = CPUID_VENDOR_INTEL,
|
||||
.family = 6,
|
||||
.model = 58,
|
||||
.stepping = 9,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
CPUID_DE | CPUID_FP87,
|
||||
.features[FEAT_1_ECX] =
|
||||
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
|
||||
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
|
||||
CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
|
||||
CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
|
||||
CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
|
||||
.features[FEAT_7_0_EBX] =
|
||||
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
|
||||
CPUID_7_0_EBX_ERMS,
|
||||
.features[FEAT_8000_0001_EDX] =
|
||||
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
|
||||
CPUID_EXT2_SYSCALL,
|
||||
.features[FEAT_8000_0001_ECX] =
|
||||
CPUID_EXT3_LAHF_LM,
|
||||
.features[FEAT_XSAVE] =
|
||||
CPUID_XSAVE_XSAVEOPT,
|
||||
.xlevel = 0x8000000A,
|
||||
.model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
|
||||
},
|
||||
{
|
||||
.name = "Haswell",
|
||||
.level = 0xd,
|
||||
|
@ -1029,7 +1081,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 60,
|
||||
.stepping = 1,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
|
@ -1040,7 +1092,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
|
||||
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
|
||||
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
|
||||
CPUID_EXT_PCID,
|
||||
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
|
||||
.features[FEAT_8000_0001_EDX] =
|
||||
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
|
||||
CPUID_EXT2_SYSCALL,
|
||||
|
@ -1051,6 +1103,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
|
||||
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
|
||||
CPUID_7_0_EBX_RTM,
|
||||
.features[FEAT_XSAVE] =
|
||||
CPUID_XSAVE_XSAVEOPT,
|
||||
.xlevel = 0x8000000A,
|
||||
.model_id = "Intel Core Processor (Haswell)",
|
||||
},
|
||||
|
@ -1062,7 +1116,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 61,
|
||||
.stepping = 2,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
|
@ -1073,7 +1127,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
|
||||
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
|
||||
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
|
||||
CPUID_EXT_PCID,
|
||||
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
|
||||
.features[FEAT_8000_0001_EDX] =
|
||||
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
|
||||
CPUID_EXT2_SYSCALL,
|
||||
|
@ -1085,6 +1139,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
|
||||
CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
|
||||
CPUID_7_0_EBX_SMAP,
|
||||
.features[FEAT_XSAVE] =
|
||||
CPUID_XSAVE_XSAVEOPT,
|
||||
.xlevel = 0x8000000A,
|
||||
.model_id = "Intel Core Processor (Broadwell)",
|
||||
},
|
||||
|
@ -1096,7 +1152,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 6,
|
||||
.stepping = 1,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
|
@ -1121,7 +1177,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 6,
|
||||
.stepping = 1,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
|
@ -1149,7 +1205,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 6,
|
||||
.stepping = 1,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
|
@ -1179,7 +1235,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 1,
|
||||
.stepping = 2,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
|
@ -1202,6 +1258,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
|
||||
CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
|
||||
CPUID_EXT3_LAHF_LM,
|
||||
/* no xsaveopt! */
|
||||
.xlevel = 0x8000001A,
|
||||
.model_id = "AMD Opteron 62xx class CPU",
|
||||
},
|
||||
|
@ -1213,7 +1270,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
.model = 2,
|
||||
.stepping = 0,
|
||||
.features[FEAT_1_EDX] =
|
||||
CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
|
||||
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
|
||||
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
|
||||
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
|
||||
|
@ -1236,6 +1293,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
|||
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
|
||||
CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
|
||||
CPUID_EXT3_LAHF_LM,
|
||||
/* no xsaveopt! */
|
||||
.xlevel = 0x8000001A,
|
||||
.model_id = "AMD Opteron 63xx class CPU",
|
||||
},
|
||||
|
@ -1530,7 +1588,7 @@ static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
|
|||
CPUX86State *env = &cpu->env;
|
||||
char *value;
|
||||
|
||||
value = (char *)g_malloc(CPUID_VENDOR_SZ + 1);
|
||||
value = g_malloc(CPUID_VENDOR_SZ + 1);
|
||||
x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
|
||||
env->cpuid_vendor3);
|
||||
return value;
|
||||
|
@ -2377,7 +2435,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
|||
*eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
|
||||
*ebx = *ecx;
|
||||
} else if (count == 1) {
|
||||
*eax = kvm_arch_get_supported_cpuid(s, 0xd, 1, R_EAX);
|
||||
*eax = env->features[FEAT_XSAVE];
|
||||
} else if (count < ARRAY_SIZE(ext_save_areas)) {
|
||||
const ExtSaveArea *esa = &ext_save_areas[count];
|
||||
if ((env->features[esa->feature] & esa->bits) == esa->bits &&
|
||||
|
|
|
@ -28,6 +28,9 @@
|
|||
#define TARGET_LONG_BITS 32
|
||||
#endif
|
||||
|
||||
/* Maximum instruction code size */
|
||||
#define TARGET_MAX_INSN_SIZE 16
|
||||
|
||||
/* target supports implicit self modifying code */
|
||||
#define TARGET_HAS_SMC
|
||||
/* support for self modifying code even if the modified instruction is
|
||||
|
@ -389,6 +392,7 @@
|
|||
#define MSR_VM_HSAVE_PA 0xc0010117
|
||||
|
||||
#define MSR_IA32_BNDCFGS 0x00000d90
|
||||
#define MSR_IA32_XSS 0x00000da0
|
||||
|
||||
#define XSTATE_FP (1ULL << 0)
|
||||
#define XSTATE_SSE (1ULL << 1)
|
||||
|
@ -411,6 +415,7 @@ typedef enum FeatureWord {
|
|||
FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
|
||||
FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
|
||||
FEAT_SVM, /* CPUID[8000_000A].EDX */
|
||||
FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
|
||||
FEATURE_WORDS,
|
||||
} FeatureWord;
|
||||
|
||||
|
@ -571,6 +576,11 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
|
|||
#define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */
|
||||
#define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */
|
||||
|
||||
#define CPUID_XSAVE_XSAVEOPT (1U << 0)
|
||||
#define CPUID_XSAVE_XSAVEC (1U << 1)
|
||||
#define CPUID_XSAVE_XGETBV1 (1U << 2)
|
||||
#define CPUID_XSAVE_XSAVES (1U << 3)
|
||||
|
||||
/* CPUID[0x80000007].EDX flags: */
|
||||
#define CPUID_APM_INVTSC (1U << 8)
|
||||
|
||||
|
@ -1019,6 +1029,7 @@ typedef struct CPUX86State {
|
|||
uint64_t xstate_bv;
|
||||
|
||||
uint64_t xcr0;
|
||||
uint64_t xss;
|
||||
|
||||
TPRAccess tpr_access_type;
|
||||
} CPUX86State;
|
||||
|
|
|
@ -80,6 +80,7 @@ static bool has_msr_hv_hypercall;
|
|||
static bool has_msr_hv_vapic;
|
||||
static bool has_msr_hv_tsc;
|
||||
static bool has_msr_mtrr;
|
||||
static bool has_msr_xss;
|
||||
|
||||
static bool has_msr_architectural_pmu;
|
||||
static uint32_t num_architectural_pmu_counters;
|
||||
|
@ -95,7 +96,7 @@ static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
|
|||
int r, size;
|
||||
|
||||
size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
|
||||
cpuid = (struct kvm_cpuid2 *)g_malloc0(size);
|
||||
cpuid = g_malloc0(size);
|
||||
cpuid->nent = max;
|
||||
r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
|
||||
if (r == 0 && cpuid->nent >= max) {
|
||||
|
@ -277,7 +278,7 @@ static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
|
|||
return;
|
||||
}
|
||||
}
|
||||
page = g_malloc(sizeof(HWPoisonPage));
|
||||
page = g_new(HWPoisonPage, 1);
|
||||
page->ram_addr = ram_addr;
|
||||
QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
|
||||
}
|
||||
|
@ -826,6 +827,10 @@ static int kvm_get_supported_msrs(KVMState *s)
|
|||
has_msr_bndcfgs = true;
|
||||
continue;
|
||||
}
|
||||
if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
|
||||
has_msr_xss = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1085,7 +1090,7 @@ static int kvm_put_xsave(X86CPU *cpu)
|
|||
static int kvm_put_xcrs(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
struct kvm_xcrs xcrs;
|
||||
struct kvm_xcrs xcrs = {};
|
||||
|
||||
if (!kvm_has_xcrs()) {
|
||||
return 0;
|
||||
|
@ -1152,6 +1157,7 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
|
|||
uint32_t index, uint64_t value)
|
||||
{
|
||||
entry->index = index;
|
||||
entry->reserved = 0;
|
||||
entry->data = value;
|
||||
}
|
||||
|
||||
|
@ -1170,7 +1176,9 @@ static int kvm_put_tscdeadline_msr(X86CPU *cpu)
|
|||
|
||||
kvm_msr_entry_set(&msrs[0], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
|
||||
|
||||
msr_data.info.nmsrs = 1;
|
||||
msr_data.info = (struct kvm_msrs) {
|
||||
.nmsrs = 1,
|
||||
};
|
||||
|
||||
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
|
||||
}
|
||||
|
@ -1190,7 +1198,11 @@ static int kvm_put_msr_feature_control(X86CPU *cpu)
|
|||
|
||||
kvm_msr_entry_set(&msr_data.entry, MSR_IA32_FEATURE_CONTROL,
|
||||
cpu->env.msr_ia32_feature_control);
|
||||
msr_data.info.nmsrs = 1;
|
||||
|
||||
msr_data.info = (struct kvm_msrs) {
|
||||
.nmsrs = 1,
|
||||
};
|
||||
|
||||
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
|
||||
}
|
||||
|
||||
|
@ -1224,6 +1236,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
|
|||
if (has_msr_bndcfgs) {
|
||||
kvm_msr_entry_set(&msrs[n++], MSR_IA32_BNDCFGS, env->msr_bndcfgs);
|
||||
}
|
||||
if (has_msr_xss) {
|
||||
kvm_msr_entry_set(&msrs[n++], MSR_IA32_XSS, env->xss);
|
||||
}
|
||||
#ifdef TARGET_X86_64
|
||||
if (lm_capable_kernel) {
|
||||
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
|
||||
|
@ -1339,7 +1354,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
|
|||
}
|
||||
}
|
||||
|
||||
msr_data.info.nmsrs = n;
|
||||
msr_data.info = (struct kvm_msrs) {
|
||||
.nmsrs = n,
|
||||
};
|
||||
|
||||
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
|
||||
|
||||
|
@ -1570,6 +1587,10 @@ static int kvm_get_msrs(X86CPU *cpu)
|
|||
if (has_msr_bndcfgs) {
|
||||
msrs[n++].index = MSR_IA32_BNDCFGS;
|
||||
}
|
||||
if (has_msr_xss) {
|
||||
msrs[n++].index = MSR_IA32_XSS;
|
||||
}
|
||||
|
||||
|
||||
if (!env->tsc_valid) {
|
||||
msrs[n++].index = MSR_IA32_TSC;
|
||||
|
@ -1646,7 +1667,10 @@ static int kvm_get_msrs(X86CPU *cpu)
|
|||
}
|
||||
}
|
||||
|
||||
msr_data.info.nmsrs = n;
|
||||
msr_data.info = (struct kvm_msrs) {
|
||||
.nmsrs = n,
|
||||
};
|
||||
|
||||
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
|
@ -1717,6 +1741,9 @@ static int kvm_get_msrs(X86CPU *cpu)
|
|||
case MSR_IA32_BNDCFGS:
|
||||
env->msr_bndcfgs = msrs[i].data;
|
||||
break;
|
||||
case MSR_IA32_XSS:
|
||||
env->xss = msrs[i].data;
|
||||
break;
|
||||
default:
|
||||
if (msrs[i].index >= MSR_MC0_CTL &&
|
||||
msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
|
||||
|
@ -1872,7 +1899,7 @@ static int kvm_put_apic(X86CPU *cpu)
|
|||
static int kvm_put_vcpu_events(X86CPU *cpu, int level)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
struct kvm_vcpu_events events;
|
||||
struct kvm_vcpu_events events = {};
|
||||
|
||||
if (!kvm_has_vcpu_events()) {
|
||||
return 0;
|
||||
|
@ -2563,7 +2590,6 @@ void kvm_arch_init_irq_routing(KVMState *s)
|
|||
* irqchip, so we can use irqfds, and on x86 we know
|
||||
* we can use msi via irqfd and GSI routing.
|
||||
*/
|
||||
kvm_irqfds_allowed = true;
|
||||
kvm_msi_via_irqfd_allowed = true;
|
||||
kvm_gsi_routing_allowed = true;
|
||||
}
|
||||
|
|
|
@ -687,6 +687,24 @@ static const VMStateDescription vmstate_avx512 = {
|
|||
}
|
||||
};
|
||||
|
||||
static bool xss_needed(void *opaque)
|
||||
{
|
||||
X86CPU *cpu = opaque;
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
return env->xss != 0;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_xss = {
|
||||
.name = "cpu/xss",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT64(env.xss, X86CPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
VMStateDescription vmstate_x86_cpu = {
|
||||
.name = "cpu",
|
||||
.version_id = 12,
|
||||
|
@ -832,6 +850,9 @@ VMStateDescription vmstate_x86_cpu = {
|
|||
}, {
|
||||
.vmsd = &vmstate_avx512,
|
||||
.needed = avx512_needed,
|
||||
}, {
|
||||
.vmsd = &vmstate_xss,
|
||||
.needed = xss_needed,
|
||||
} , {
|
||||
/* empty */
|
||||
}
|
||||
|
|
|
@ -2228,7 +2228,7 @@ void glue(helper_aesdeclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
|
|||
Reg rk = *s;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
d->B(i) = rk.B(i) ^ (AES_Td4[st.B(AES_ishifts[i])] & 0xff);
|
||||
d->B(i) = rk.B(i) ^ (AES_isbox[st.B(AES_ishifts[i])]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2253,7 +2253,7 @@ void glue(helper_aesenclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
|
|||
Reg rk = *s;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
d->B(i) = rk.B(i) ^ (AES_Te4[st.B(AES_shifts[i])] & 0xff);
|
||||
d->B(i) = rk.B(i) ^ (AES_sbox[st.B(AES_shifts[i])]);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -2264,10 +2264,10 @@ void glue(helper_aesimc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
|
|||
Reg tmp = *s;
|
||||
|
||||
for (i = 0 ; i < 4 ; i++) {
|
||||
d->L(i) = bswap32(AES_Td0[AES_Te4[tmp.B(4*i+0)] & 0xff] ^
|
||||
AES_Td1[AES_Te4[tmp.B(4*i+1)] & 0xff] ^
|
||||
AES_Td2[AES_Te4[tmp.B(4*i+2)] & 0xff] ^
|
||||
AES_Td3[AES_Te4[tmp.B(4*i+3)] & 0xff]);
|
||||
d->L(i) = bswap32(AES_imc[tmp.B(4*i+0)][0] ^
|
||||
AES_imc[tmp.B(4*i+1)][1] ^
|
||||
AES_imc[tmp.B(4*i+2)][2] ^
|
||||
AES_imc[tmp.B(4*i+3)][3]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2278,8 +2278,8 @@ void glue(helper_aeskeygenassist, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
|
|||
Reg tmp = *s;
|
||||
|
||||
for (i = 0 ; i < 4 ; i++) {
|
||||
d->B(i) = AES_Te4[tmp.B(i + 4)] & 0xff;
|
||||
d->B(i + 8) = AES_Te4[tmp.B(i + 12)] & 0xff;
|
||||
d->B(i) = AES_sbox[tmp.B(i + 4)];
|
||||
d->B(i + 8) = AES_sbox[tmp.B(i + 12)];
|
||||
}
|
||||
d->L(1) = (d->L(0) << 24 | d->L(0) >> 8) ^ ctrl;
|
||||
d->L(3) = (d->L(2) << 24 | d->L(2) >> 8) ^ ctrl;
|
||||
|
|
|
@ -8034,6 +8034,20 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
|
|||
gen_eob(dc);
|
||||
break;
|
||||
}
|
||||
/* Do not cross the boundary of the pages in icount mode,
|
||||
it can cause an exception. Do it only when boundary is
|
||||
crossed by the first instruction in the block.
|
||||
If current instruction already crossed the bound - it's ok,
|
||||
because an exception hasn't stopped this code.
|
||||
*/
|
||||
if (use_icount
|
||||
&& ((pc_ptr & TARGET_PAGE_MASK)
|
||||
!= ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
|
||||
|| (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
|
||||
gen_jmp_im(pc_ptr - dc->cs_base);
|
||||
gen_eob(dc);
|
||||
break;
|
||||
}
|
||||
/* if too long translation, stop generation too */
|
||||
if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
|
||||
(pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
|
||||
|
|
|
@ -439,7 +439,7 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
|
|||
}
|
||||
} else {
|
||||
/* Set clock restore time to now */
|
||||
count_resume = get_clock();
|
||||
count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
ret = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_RESUME,
|
||||
&count_resume);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -208,7 +208,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
|||
CPUS390XState *env = &cpu->env;
|
||||
struct kvm_sregs sregs;
|
||||
struct kvm_regs regs;
|
||||
struct kvm_fpu fpu;
|
||||
struct kvm_fpu fpu = {};
|
||||
int r;
|
||||
int i;
|
||||
|
||||
|
@ -1294,7 +1294,6 @@ void kvm_arch_init_irq_routing(KVMState *s)
|
|||
* have to override the common code kvm_halt_in_kernel_allowed setting.
|
||||
*/
|
||||
if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
|
||||
kvm_irqfds_allowed = true;
|
||||
kvm_gsi_routing_allowed = true;
|
||||
kvm_halt_in_kernel_allowed = false;
|
||||
}
|
||||
|
|
|
@ -264,6 +264,12 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
|
|||
tb = tb_find_pc(retaddr);
|
||||
if (tb) {
|
||||
cpu_restore_state_from_tb(cpu, tb, retaddr);
|
||||
if (tb->cflags & CF_NOCACHE) {
|
||||
/* one-shot translation, invalidate it immediately */
|
||||
cpu->current_tb = NULL;
|
||||
tb_phys_invalidate(tb, -1);
|
||||
tb_free(tb);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
Loading…
Reference in New Issue