mirror of https://gitee.com/openkylin/qemu.git
ppc patch queue for 2016-02-01
Currently accumulated patches for target-ppc, pseries machine type and related devices. * Cleanup of error handling code in spapr * A number of fixes for Macintosh devices for the benefit of MacOS 9 and X * Remove some abuses of the RTAS memory access functions in spapr * Fixes for the gdbstub (and monitor debug) for VMX and VSX extensions. * Fix pseries machine hotplug memory under TCG * Clean up and extend handling of multiple page sizes with 64-bit hash MMUs * Fix to the TCG implementation of mcrfs -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJWrsLSAAoJEGw4ysog2bOS8/AQAIFdKPrWUbtWi5Wrt8odLiIh BDYi01pbWuU8BdzcTS3yBP8TIl8/FTo68Cpd42PmBtseT3M+jS7RdgKrdfzx9T+j ebJSoIf08zoR1Y57rr8CYX7p23w1N5MqlOTrJ7JYExWoIHokO0AegB4EfTFO5tfE gNK4Z5oZigVz3sZmm6juXRtpYxLWit8WOJxZ+CwVH26RfiI2pV922KYbed8t0oih vv07crJgRUptiarjf9woU9HW7J2fzrOXIY6QfyjdVxc8SUBFfCjvRekMBFuU2bex +QQOiI2u5QPNNO8fi5MXAlYF2uUEnG6tWKqpw2tWojPX8/xnscvFVEwDWrn0txl5 VTbvpKToicX+xLjSWiHIawwCbaFkw9jYc7B8/xuKCo8g/eMz6xMbjw1j7YCS+yGk 98cEwe+JNOgsk3QRttp3t7Vo76XL8HtA0AK7SkYCMfb6SlHHeghQq4zWWq76S5Bp DYeA2wRjeBkKAnbSDYIKbVLuZj10QYJTl1Jr3At8YHqEm7Fdth5Dk5VqqR3KZQF+ OyU9+GT2WEAu8t1aYTNGKNFJVcEPZ5BfrkjLK1YJTQk56V+M1wRs51voVJkxlKMO 8JEYefxf1OCXwA1/DPKJgAtwCjtGzHrsW6UEhAvmuzHXx3bE1Zjy5SCeR3I6ib0F xwPcSf+/eS5lFKtE3etg =F5Nl -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.6-20160201' into staging ppc patch queue for 2016-02-01 Currently accumulated patches for target-ppc, pseries machine type and related devices. * Cleanup of error handling code in spapr * A number of fixes for Macintosh devices for the benefit of MacOS 9 and X * Remove some abuses of the RTAS memory access functions in spapr * Fixes for the gdbstub (and monitor debug) for VMX and VSX extensions. * Fix pseries machine hotplug memory under TCG * Clean up and extend handling of multiple page sizes with 64-bit hash MMUs * Fix to the TCG implementation of mcrfs # gpg: Signature made Mon 01 Feb 2016 02:28:34 GMT using RSA key ID 20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" # gpg: WARNING: This key is not certified with sufficiently trusted signatures! # gpg: It is not certain that the signature belongs to the owner. # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-2.6-20160201: (40 commits) target-ppc: mcrfs should always update FEX/VX and only clear exception bits target-ppc: Make every FPSCR_ macro have a corresponding FP_ macro target-ppc: Allow more page sizes for POWER7 & POWER8 in TCG target-ppc: Helper to determine page size information from hpte alone target-ppc: Add new TLB invalidate by HPTE call for hash64 MMUs target-ppc: Split 44x tlbiva from ppc_tlb_invalidate_one() target-ppc: Remove unused mmu models from ppc_tlb_invalidate_one target-ppc: Use actual page size encodings from HPTE target-ppc: Rework SLB page size lookup target-ppc: Rework ppc_store_slb target-ppc: Convert mmu-hash{32,64}.[ch] from CPUPPCState to PowerPCCPU target-ppc: Remove unused kvmppc_read_segment_page_sizes() stub uninorth.c: add support for UniNorth kMacRISCPCIAddressSelect (0x48) register cuda.c: return error for unknown commands pseries: Allow TCG h_enter to work with hotplugged memory target-ppc: gdbstub: Add VSX support target-ppc: gdbstub: fix spe registers for little-endian guests target-ppc: gdbstub: fix altivec registers for little-endian guests target-ppc: gdbstub: introduce avr_need_swap() target-ppc: gdbstub: fix float registers for little-endian guests ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
10ae9d7638
|
@ -5702,20 +5702,20 @@ case "$target_name" in
|
|||
ppc64)
|
||||
TARGET_BASE_ARCH=ppc
|
||||
TARGET_ABI_DIR=ppc
|
||||
gdb_xml_files="power64-core.xml power-fpu.xml power-altivec.xml power-spe.xml"
|
||||
gdb_xml_files="power64-core.xml power-fpu.xml power-altivec.xml power-spe.xml power-vsx.xml"
|
||||
;;
|
||||
ppc64le)
|
||||
TARGET_ARCH=ppc64
|
||||
TARGET_BASE_ARCH=ppc
|
||||
TARGET_ABI_DIR=ppc
|
||||
gdb_xml_files="power64-core.xml power-fpu.xml power-altivec.xml power-spe.xml"
|
||||
gdb_xml_files="power64-core.xml power-fpu.xml power-altivec.xml power-spe.xml power-vsx.xml"
|
||||
;;
|
||||
ppc64abi32)
|
||||
TARGET_ARCH=ppc64
|
||||
TARGET_BASE_ARCH=ppc
|
||||
TARGET_ABI_DIR=ppc
|
||||
echo "TARGET_ABI32=y" >> $config_target_mak
|
||||
gdb_xml_files="power64-core.xml power-fpu.xml power-altivec.xml power-spe.xml"
|
||||
gdb_xml_files="power64-core.xml power-fpu.xml power-altivec.xml power-spe.xml power-vsx.xml"
|
||||
;;
|
||||
sh4|sh4eb)
|
||||
TARGET_ARCH=sh4
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
<?xml version="1.0"?>
|
||||
<!-- Copyright (C) 2008-2015 Free Software Foundation, Inc.
|
||||
|
||||
Copying and distribution of this file, with or without modification,
|
||||
are permitted in any medium without royalty provided the copyright
|
||||
notice and this notice are preserved. -->
|
||||
|
||||
<!-- POWER7 VSX registers that do not overlap existing FP and VMX
|
||||
registers. -->
|
||||
<!DOCTYPE feature SYSTEM "gdb-target.dtd">
|
||||
<feature name="org.gnu.gdb.power.vsx">
|
||||
<reg name="vs0h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs1h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs2h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs3h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs4h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs5h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs6h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs7h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs8h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs9h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs10h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs11h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs12h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs13h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs14h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs15h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs16h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs17h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs18h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs19h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs20h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs21h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs22h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs23h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs24h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs25h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs26h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs27h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs28h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs29h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs30h" bitsize="64" type="uint64"/>
|
||||
<reg name="vs31h" bitsize="64" type="uint64"/>
|
||||
</feature>
|
|
@ -120,8 +120,8 @@ static void pmac_dma_read(BlockBackend *blk,
|
|||
MACIO_DPRINTF("--- Block read transfer - sector_num: %" PRIx64 " "
|
||||
"nsector: %x\n", (offset >> 9), (bytes >> 9));
|
||||
|
||||
m->aiocb = blk_aio_readv(blk, (offset >> 9), &io->iov, (bytes >> 9),
|
||||
cb, io);
|
||||
s->bus->dma->aiocb = blk_aio_readv(blk, (offset >> 9), &io->iov,
|
||||
(bytes >> 9), cb, io);
|
||||
}
|
||||
|
||||
static void pmac_dma_write(BlockBackend *blk,
|
||||
|
@ -205,8 +205,8 @@ static void pmac_dma_write(BlockBackend *blk,
|
|||
MACIO_DPRINTF("--- Block write transfer - sector_num: %" PRIx64 " "
|
||||
"nsector: %x\n", (offset >> 9), (bytes >> 9));
|
||||
|
||||
m->aiocb = blk_aio_writev(blk, (offset >> 9), &io->iov, (bytes >> 9),
|
||||
cb, io);
|
||||
s->bus->dma->aiocb = blk_aio_writev(blk, (offset >> 9), &io->iov,
|
||||
(bytes >> 9), cb, io);
|
||||
}
|
||||
|
||||
static void pmac_dma_trim(BlockBackend *blk,
|
||||
|
@ -232,8 +232,8 @@ static void pmac_dma_trim(BlockBackend *blk,
|
|||
s->io_buffer_index += io->len;
|
||||
io->len = 0;
|
||||
|
||||
m->aiocb = ide_issue_trim(blk, (offset >> 9), &io->iov, (bytes >> 9),
|
||||
cb, io);
|
||||
s->bus->dma->aiocb = ide_issue_trim(blk, (offset >> 9), &io->iov,
|
||||
(bytes >> 9), cb, io);
|
||||
}
|
||||
|
||||
static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
|
||||
|
@ -292,6 +292,8 @@ done:
|
|||
} else {
|
||||
block_acct_done(blk_get_stats(s->blk), &s->acct);
|
||||
}
|
||||
|
||||
ide_set_inactive(s, false);
|
||||
io->dma_end(opaque);
|
||||
}
|
||||
|
||||
|
@ -306,7 +308,6 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
|
|||
|
||||
if (ret < 0) {
|
||||
MACIO_DPRINTF("DMA error: %d\n", ret);
|
||||
m->aiocb = NULL;
|
||||
ide_dma_error(s);
|
||||
goto done;
|
||||
}
|
||||
|
@ -357,6 +358,8 @@ done:
|
|||
block_acct_done(blk_get_stats(s->blk), &s->acct);
|
||||
}
|
||||
}
|
||||
|
||||
ide_set_inactive(s, false);
|
||||
io->dma_end(opaque);
|
||||
}
|
||||
|
||||
|
@ -394,8 +397,9 @@ static void pmac_ide_transfer(DBDMA_io *io)
|
|||
static void pmac_ide_flush(DBDMA_io *io)
|
||||
{
|
||||
MACIOIDEState *m = io->opaque;
|
||||
IDEState *s = idebus_active_if(&m->bus);
|
||||
|
||||
if (m->aiocb) {
|
||||
if (s->bus->dma->aiocb) {
|
||||
blk_drain_all();
|
||||
}
|
||||
}
|
||||
|
@ -513,11 +517,12 @@ static const MemoryRegionOps pmac_ide_ops = {
|
|||
|
||||
static const VMStateDescription vmstate_pmac = {
|
||||
.name = "ide",
|
||||
.version_id = 3,
|
||||
.version_id = 4,
|
||||
.minimum_version_id = 0,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_IDE_BUS(bus, MACIOIDEState),
|
||||
VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState),
|
||||
VMSTATE_BOOL(dma_active, MACIOIDEState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
|
|
@ -606,6 +606,11 @@ static void cuda_receive_packet(CUDAState *s,
|
|||
}
|
||||
break;
|
||||
default:
|
||||
obuf[0] = ERROR_PACKET;
|
||||
obuf[1] = 0x2;
|
||||
obuf[2] = CUDA_PACKET;
|
||||
obuf[3] = data[0];
|
||||
cuda_send_packet_to_host(s, obuf, 4);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -705,15 +710,17 @@ static const VMStateDescription vmstate_cuda_timer = {
|
|||
|
||||
static const VMStateDescription vmstate_cuda = {
|
||||
.name = "cuda",
|
||||
.version_id = 2,
|
||||
.minimum_version_id = 2,
|
||||
.version_id = 3,
|
||||
.minimum_version_id = 3,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT8(a, CUDAState),
|
||||
VMSTATE_UINT8(b, CUDAState),
|
||||
VMSTATE_UINT8(last_b, CUDAState),
|
||||
VMSTATE_UINT8(dira, CUDAState),
|
||||
VMSTATE_UINT8(dirb, CUDAState),
|
||||
VMSTATE_UINT8(sr, CUDAState),
|
||||
VMSTATE_UINT8(acr, CUDAState),
|
||||
VMSTATE_UINT8(last_acr, CUDAState),
|
||||
VMSTATE_UINT8(pcr, CUDAState),
|
||||
VMSTATE_UINT8(ifr, CUDAState),
|
||||
VMSTATE_UINT8(ier, CUDAState),
|
||||
|
@ -728,6 +735,7 @@ static const VMStateDescription vmstate_cuda = {
|
|||
VMSTATE_STRUCT_ARRAY(timers, CUDAState, 2, 1,
|
||||
vmstate_cuda_timer, CUDATimer),
|
||||
VMSTATE_TIMER_PTR(adb_poll_timer, CUDAState),
|
||||
VMSTATE_TIMER_PTR(sr_delay_timer, CUDAState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
|
|
@ -713,20 +713,52 @@ static const MemoryRegionOps dbdma_ops = {
|
|||
},
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_dbdma_channel = {
|
||||
.name = "dbdma_channel",
|
||||
static const VMStateDescription vmstate_dbdma_io = {
|
||||
.name = "dbdma_io",
|
||||
.version_id = 0,
|
||||
.minimum_version_id = 0,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT64(addr, struct DBDMA_io),
|
||||
VMSTATE_INT32(len, struct DBDMA_io),
|
||||
VMSTATE_INT32(is_last, struct DBDMA_io),
|
||||
VMSTATE_INT32(is_dma_out, struct DBDMA_io),
|
||||
VMSTATE_BOOL(processing, struct DBDMA_io),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_dbdma_cmd = {
|
||||
.name = "dbdma_cmd",
|
||||
.version_id = 0,
|
||||
.minimum_version_id = 0,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT16(req_count, dbdma_cmd),
|
||||
VMSTATE_UINT16(command, dbdma_cmd),
|
||||
VMSTATE_UINT32(phy_addr, dbdma_cmd),
|
||||
VMSTATE_UINT32(cmd_dep, dbdma_cmd),
|
||||
VMSTATE_UINT16(res_count, dbdma_cmd),
|
||||
VMSTATE_UINT16(xfer_status, dbdma_cmd),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_dbdma_channel = {
|
||||
.name = "dbdma_channel",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS),
|
||||
VMSTATE_STRUCT(io, struct DBDMA_channel, 0, vmstate_dbdma_io, DBDMA_io),
|
||||
VMSTATE_STRUCT(current, struct DBDMA_channel, 0, vmstate_dbdma_cmd,
|
||||
dbdma_cmd),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_dbdma = {
|
||||
.name = "dbdma",
|
||||
.version_id = 2,
|
||||
.minimum_version_id = 2,
|
||||
.version_id = 3,
|
||||
.minimum_version_id = 3,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1,
|
||||
vmstate_dbdma_channel, DBDMA_channel),
|
||||
|
|
|
@ -331,6 +331,15 @@ static void unin_agp_pci_host_realize(PCIDevice *d, Error **errp)
|
|||
d->config[0x0C] = 0x08; // cache_line_size
|
||||
d->config[0x0D] = 0x10; // latency_timer
|
||||
// d->config[0x34] = 0x80; // capabilities_pointer
|
||||
/*
|
||||
* Set kMacRISCPCIAddressSelect (0x48) register to indicate PCI
|
||||
* memory space with base 0x80000000, size 0x10000000 for Apple's
|
||||
* AppleMacRiscPCI driver
|
||||
*/
|
||||
d->config[0x48] = 0x0;
|
||||
d->config[0x49] = 0x0;
|
||||
d->config[0x4a] = 0x0;
|
||||
d->config[0x4b] = 0x1;
|
||||
}
|
||||
|
||||
static void u3_agp_pci_host_realize(PCIDevice *d, Error **errp)
|
||||
|
|
|
@ -134,7 +134,6 @@ typedef struct MACIOIDEState {
|
|||
|
||||
MemoryRegion mem;
|
||||
IDEBus bus;
|
||||
BlockAIOCB *aiocb;
|
||||
IDEDMA dma;
|
||||
void *dbdma;
|
||||
bool dma_active;
|
||||
|
|
112
hw/ppc/spapr.c
112
hw/ppc/spapr.c
|
@ -112,7 +112,7 @@ static XICSState *try_create_xics(const char *type, int nr_servers,
|
|||
}
|
||||
|
||||
static XICSState *xics_system_init(MachineState *machine,
|
||||
int nr_servers, int nr_irqs)
|
||||
int nr_servers, int nr_irqs, Error **errp)
|
||||
{
|
||||
XICSState *icp = NULL;
|
||||
|
||||
|
@ -131,7 +131,7 @@ static XICSState *xics_system_init(MachineState *machine,
|
|||
}
|
||||
|
||||
if (!icp) {
|
||||
icp = try_create_xics(TYPE_XICS, nr_servers, nr_irqs, &error_abort);
|
||||
icp = try_create_xics(TYPE_XICS, nr_servers, nr_irqs, errp);
|
||||
}
|
||||
|
||||
return icp;
|
||||
|
@ -763,6 +763,13 @@ static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
|
|||
uint32_t *int_buf, *cur_index, buf_len;
|
||||
int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
|
||||
|
||||
/*
|
||||
* Don't create the node if there are no DR LMBs.
|
||||
*/
|
||||
if (!nr_lmbs) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate enough buffer size to fit in ibm,dynamic-memory
|
||||
* or ibm,associativity-lookup-arrays
|
||||
|
@ -869,7 +876,7 @@ int spapr_h_cas_compose_response(sPAPRMachineState *spapr,
|
|||
_FDT((spapr_fixup_cpu_dt(fdt, spapr)));
|
||||
}
|
||||
|
||||
/* Generate memory nodes or ibm,dynamic-reconfiguration-memory node */
|
||||
/* Generate ibm,dynamic-reconfiguration-memory node if required */
|
||||
if (memory_update && smc->dr_lmb_enabled) {
|
||||
_FDT((spapr_populate_drconf_memory(spapr, fdt)));
|
||||
}
|
||||
|
@ -1239,7 +1246,7 @@ static void spapr_rtc_create(sPAPRMachineState *spapr)
|
|||
}
|
||||
|
||||
/* Returns whether we want to use VGA or not */
|
||||
static int spapr_vga_init(PCIBus *pci_bus)
|
||||
static bool spapr_vga_init(PCIBus *pci_bus, Error **errp)
|
||||
{
|
||||
switch (vga_interface_type) {
|
||||
case VGA_NONE:
|
||||
|
@ -1250,9 +1257,9 @@ static int spapr_vga_init(PCIBus *pci_bus)
|
|||
case VGA_VIRTIO:
|
||||
return pci_vga_init(pci_bus) != NULL;
|
||||
default:
|
||||
fprintf(stderr, "This vga model is not supported,"
|
||||
"currently it only supports -vga std\n");
|
||||
exit(0);
|
||||
error_setg(errp,
|
||||
"Unsupported VGA mode, only -vga std or -vga virtio is supported");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1527,7 +1534,7 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
|
|||
int fd = -1;
|
||||
|
||||
if (version_id < 1 || version_id > 1) {
|
||||
fprintf(stderr, "htab_load() bad version\n");
|
||||
error_report("htab_load() bad version");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1548,8 +1555,8 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
|
|||
|
||||
fd = kvmppc_get_htab_fd(true);
|
||||
if (fd < 0) {
|
||||
fprintf(stderr, "Unable to open fd to restore KVM hash table: %s\n",
|
||||
strerror(errno));
|
||||
error_report("Unable to open fd to restore KVM hash table: %s",
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1569,9 +1576,9 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
|
|||
if ((index + n_valid + n_invalid) >
|
||||
(HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
|
||||
/* Bad index in stream */
|
||||
fprintf(stderr, "htab_load() bad index %d (%hd+%hd entries) "
|
||||
"in htab stream (htab_shift=%d)\n", index, n_valid, n_invalid,
|
||||
spapr->htab_shift);
|
||||
error_report(
|
||||
"htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
|
||||
index, n_valid, n_invalid, spapr->htab_shift);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1618,7 +1625,8 @@ static void spapr_boot_set(void *opaque, const char *boot_device,
|
|||
machine->boot_order = g_strdup(boot_device);
|
||||
}
|
||||
|
||||
static void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu)
|
||||
static void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu,
|
||||
Error **errp)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
|
@ -1636,8 +1644,12 @@ static void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu)
|
|||
}
|
||||
|
||||
if (cpu->max_compat) {
|
||||
if (ppc_set_compat(cpu, cpu->max_compat) < 0) {
|
||||
exit(1);
|
||||
Error *local_err = NULL;
|
||||
|
||||
ppc_set_compat(cpu, cpu->max_compat, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1687,27 +1699,34 @@ static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr)
|
|||
* to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
|
||||
* since we can't support such unaligned sizes with DRCONF_MEMORY.
|
||||
*/
|
||||
static void spapr_validate_node_memory(MachineState *machine)
|
||||
static void spapr_validate_node_memory(MachineState *machine, Error **errp)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE ||
|
||||
machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
|
||||
error_report("Can't support memory configuration where RAM size "
|
||||
"0x" RAM_ADDR_FMT " or maxmem size "
|
||||
"0x" RAM_ADDR_FMT " isn't aligned to %llu MB",
|
||||
machine->ram_size, machine->maxram_size,
|
||||
SPAPR_MEMORY_BLOCK_SIZE/M_BYTE);
|
||||
exit(EXIT_FAILURE);
|
||||
if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
|
||||
error_setg(errp, "Memory size 0x" RAM_ADDR_FMT
|
||||
" is not aligned to %llu MiB",
|
||||
machine->ram_size,
|
||||
SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
|
||||
return;
|
||||
}
|
||||
|
||||
if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) {
|
||||
error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT
|
||||
" is not aligned to %llu MiB",
|
||||
machine->ram_size,
|
||||
SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nb_numa_nodes; i++) {
|
||||
if (numa_info[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
|
||||
error_report("Can't support memory configuration where memory size"
|
||||
" %" PRIx64 " of node %d isn't aligned to %llu MB",
|
||||
numa_info[i].node_mem, i,
|
||||
SPAPR_MEMORY_BLOCK_SIZE/M_BYTE);
|
||||
exit(EXIT_FAILURE);
|
||||
error_setg(errp,
|
||||
"Node %d memory size 0x%" PRIx64
|
||||
" is not aligned to %llu MiB",
|
||||
i, numa_info[i].node_mem,
|
||||
SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1770,8 +1789,8 @@ static void ppc_spapr_init(MachineState *machine)
|
|||
}
|
||||
|
||||
if (spapr->rma_size > node0_size) {
|
||||
fprintf(stderr, "Error: Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")\n",
|
||||
spapr->rma_size);
|
||||
error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")",
|
||||
spapr->rma_size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
@ -1794,10 +1813,10 @@ static void ppc_spapr_init(MachineState *machine)
|
|||
spapr->icp = xics_system_init(machine,
|
||||
DIV_ROUND_UP(max_cpus * kvmppc_smt_threads(),
|
||||
smp_threads),
|
||||
XICS_IRQS);
|
||||
XICS_IRQS, &error_fatal);
|
||||
|
||||
if (smc->dr_lmb_enabled) {
|
||||
spapr_validate_node_memory(machine);
|
||||
spapr_validate_node_memory(machine, &error_fatal);
|
||||
}
|
||||
|
||||
/* init CPUs */
|
||||
|
@ -1807,10 +1826,10 @@ static void ppc_spapr_init(MachineState *machine)
|
|||
for (i = 0; i < smp_cpus; i++) {
|
||||
cpu = cpu_ppc_init(machine->cpu_model);
|
||||
if (cpu == NULL) {
|
||||
fprintf(stderr, "Unable to find PowerPC CPU definition\n");
|
||||
error_report("Unable to find PowerPC CPU definition");
|
||||
exit(1);
|
||||
}
|
||||
spapr_cpu_init(spapr, cpu);
|
||||
spapr_cpu_init(spapr, cpu, &error_fatal);
|
||||
}
|
||||
|
||||
if (kvm_enabled()) {
|
||||
|
@ -1837,10 +1856,10 @@ static void ppc_spapr_init(MachineState *machine)
|
|||
ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size;
|
||||
|
||||
if (machine->ram_slots > SPAPR_MAX_RAM_SLOTS) {
|
||||
error_report("Specified number of memory slots %" PRIu64
|
||||
" exceeds max supported %d",
|
||||
error_report("Specified number of memory slots %"
|
||||
PRIu64" exceeds max supported %d",
|
||||
machine->ram_slots, SPAPR_MAX_RAM_SLOTS);
|
||||
exit(EXIT_FAILURE);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
spapr->hotplug_memory.base = ROUND_UP(machine->ram_size,
|
||||
|
@ -1915,7 +1934,7 @@ static void ppc_spapr_init(MachineState *machine)
|
|||
}
|
||||
|
||||
/* Graphics */
|
||||
if (spapr_vga_init(phb->bus)) {
|
||||
if (spapr_vga_init(phb->bus, &error_fatal)) {
|
||||
spapr->has_graphics = true;
|
||||
machine->usb |= defaults_enabled() && !machine->usb_disabled;
|
||||
}
|
||||
|
@ -1936,8 +1955,9 @@ static void ppc_spapr_init(MachineState *machine)
|
|||
}
|
||||
|
||||
if (spapr->rma_size < (MIN_RMA_SLOF << 20)) {
|
||||
fprintf(stderr, "qemu: pSeries SLOF firmware requires >= "
|
||||
"%ldM guest RMA (Real Mode Area memory)\n", MIN_RMA_SLOF);
|
||||
error_report(
|
||||
"pSeries SLOF firmware requires >= %ldM guest RMA (Real Mode Area memory)",
|
||||
MIN_RMA_SLOF);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
@ -1953,8 +1973,8 @@ static void ppc_spapr_init(MachineState *machine)
|
|||
kernel_le = kernel_size > 0;
|
||||
}
|
||||
if (kernel_size < 0) {
|
||||
fprintf(stderr, "qemu: error loading %s: %s\n",
|
||||
kernel_filename, load_elf_strerror(kernel_size));
|
||||
error_report("error loading %s: %s",
|
||||
kernel_filename, load_elf_strerror(kernel_size));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
@ -1967,8 +1987,8 @@ static void ppc_spapr_init(MachineState *machine)
|
|||
initrd_size = load_image_targphys(initrd_filename, initrd_base,
|
||||
load_limit - initrd_base);
|
||||
if (initrd_size < 0) {
|
||||
fprintf(stderr, "qemu: could not load initial ram disk '%s'\n",
|
||||
initrd_filename);
|
||||
error_report("could not load initial ram disk '%s'",
|
||||
initrd_filename);
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -38,42 +38,6 @@ static void set_spr(CPUState *cs, int spr, target_ulong value,
|
|||
run_on_cpu(cs, do_spr_sync, &s);
|
||||
}
|
||||
|
||||
static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r,
|
||||
target_ulong pte_index)
|
||||
{
|
||||
target_ulong rb, va_low;
|
||||
|
||||
rb = (v & ~0x7fULL) << 16; /* AVA field */
|
||||
va_low = pte_index >> 3;
|
||||
if (v & HPTE64_V_SECONDARY) {
|
||||
va_low = ~va_low;
|
||||
}
|
||||
/* xor vsid from AVA */
|
||||
if (!(v & HPTE64_V_1TB_SEG)) {
|
||||
va_low ^= v >> 12;
|
||||
} else {
|
||||
va_low ^= v >> 24;
|
||||
}
|
||||
va_low &= 0x7ff;
|
||||
if (v & HPTE64_V_LARGE) {
|
||||
rb |= 1; /* L field */
|
||||
#if 0 /* Disable that P7 specific bit for now */
|
||||
if (r & 0xff000) {
|
||||
/* non-16MB large page, must be 64k */
|
||||
/* (masks depend on page size) */
|
||||
rb |= 0x1000; /* page encoding in LP field */
|
||||
rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
|
||||
rb |= (va_low & 0xfe); /* AVAL field */
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
/* 4kB page */
|
||||
rb |= (va_low & 0x7ff) << 12; /* remaining 11b of AVA */
|
||||
}
|
||||
rb |= (v >> 54) & 0x300; /* B field */
|
||||
return rb;
|
||||
}
|
||||
|
||||
static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
|
||||
{
|
||||
/*
|
||||
|
@ -85,42 +49,44 @@ static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool is_ram_address(sPAPRMachineState *spapr, hwaddr addr)
|
||||
{
|
||||
MachineState *machine = MACHINE(spapr);
|
||||
MemoryHotplugState *hpms = &spapr->hotplug_memory;
|
||||
|
||||
if (addr < machine->ram_size) {
|
||||
return true;
|
||||
}
|
||||
if ((addr >= hpms->base)
|
||||
&& ((addr - hpms->base) < memory_region_size(&hpms->mr))) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
MachineState *machine = MACHINE(spapr);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong flags = args[0];
|
||||
target_ulong pte_index = args[1];
|
||||
target_ulong pteh = args[2];
|
||||
target_ulong ptel = args[3];
|
||||
target_ulong page_shift = 12;
|
||||
unsigned apshift, spshift;
|
||||
target_ulong raddr;
|
||||
target_ulong index;
|
||||
uint64_t token;
|
||||
|
||||
/* only handle 4k and 16M pages for now */
|
||||
if (pteh & HPTE64_V_LARGE) {
|
||||
#if 0 /* We don't support 64k pages yet */
|
||||
if ((ptel & 0xf000) == 0x1000) {
|
||||
/* 64k page */
|
||||
} else
|
||||
#endif
|
||||
if ((ptel & 0xff000) == 0) {
|
||||
/* 16M page */
|
||||
page_shift = 24;
|
||||
/* lowest AVA bit must be 0 for 16M pages */
|
||||
if (pteh & 0x80) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
} else {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, &spshift);
|
||||
if (!apshift) {
|
||||
/* Bad page size encoding */
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << page_shift) - 1);
|
||||
raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
|
||||
|
||||
if (raddr < machine->ram_size) {
|
||||
if (is_ram_address(spapr, raddr)) {
|
||||
/* Regular RAM - should have WIMG=0010 */
|
||||
if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
|
||||
return H_PARAMETER;
|
||||
|
@ -146,7 +112,7 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
|||
pte_index &= ~7ULL;
|
||||
token = ppc_hash64_start_access(cpu, pte_index);
|
||||
for (; index < 8; index++) {
|
||||
if ((ppc_hash64_load_hpte0(env, token, index) & HPTE64_V_VALID) == 0) {
|
||||
if (!(ppc_hash64_load_hpte0(cpu, token, index) & HPTE64_V_VALID)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -156,14 +122,14 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
|||
}
|
||||
} else {
|
||||
token = ppc_hash64_start_access(cpu, pte_index);
|
||||
if (ppc_hash64_load_hpte0(env, token, 0) & HPTE64_V_VALID) {
|
||||
if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) {
|
||||
ppc_hash64_stop_access(token);
|
||||
return H_PTEG_FULL;
|
||||
}
|
||||
ppc_hash64_stop_access(token);
|
||||
}
|
||||
|
||||
ppc_hash64_store_hpte(env, pte_index + index,
|
||||
ppc_hash64_store_hpte(cpu, pte_index + index,
|
||||
pteh | HPTE64_V_HPTE_DIRTY, ptel);
|
||||
|
||||
args[0] = pte_index + index;
|
||||
|
@ -177,21 +143,22 @@ typedef enum {
|
|||
REMOVE_HW = 3,
|
||||
} RemoveResult;
|
||||
|
||||
static RemoveResult remove_hpte(CPUPPCState *env, target_ulong ptex,
|
||||
static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
|
||||
target_ulong avpn,
|
||||
target_ulong flags,
|
||||
target_ulong *vp, target_ulong *rp)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
uint64_t token;
|
||||
target_ulong v, r, rb;
|
||||
target_ulong v, r;
|
||||
|
||||
if (!valid_pte_index(env, ptex)) {
|
||||
return REMOVE_PARM;
|
||||
}
|
||||
|
||||
token = ppc_hash64_start_access(ppc_env_get_cpu(env), ptex);
|
||||
v = ppc_hash64_load_hpte0(env, token, 0);
|
||||
r = ppc_hash64_load_hpte1(env, token, 0);
|
||||
token = ppc_hash64_start_access(cpu, ptex);
|
||||
v = ppc_hash64_load_hpte0(cpu, token, 0);
|
||||
r = ppc_hash64_load_hpte1(cpu, token, 0);
|
||||
ppc_hash64_stop_access(token);
|
||||
|
||||
if ((v & HPTE64_V_VALID) == 0 ||
|
||||
|
@ -201,22 +168,20 @@ static RemoveResult remove_hpte(CPUPPCState *env, target_ulong ptex,
|
|||
}
|
||||
*vp = v;
|
||||
*rp = r;
|
||||
ppc_hash64_store_hpte(env, ptex, HPTE64_V_HPTE_DIRTY, 0);
|
||||
rb = compute_tlbie_rb(v, r, ptex);
|
||||
ppc_tlb_invalidate_one(env, rb);
|
||||
ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
|
||||
ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
|
||||
return REMOVE_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong flags = args[0];
|
||||
target_ulong pte_index = args[1];
|
||||
target_ulong avpn = args[2];
|
||||
RemoveResult ret;
|
||||
|
||||
ret = remove_hpte(env, pte_index, avpn, flags,
|
||||
ret = remove_hpte(cpu, pte_index, avpn, flags,
|
||||
&args[0], &args[1]);
|
||||
|
||||
switch (ret) {
|
||||
|
@ -257,7 +222,6 @@ static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
|||
static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
|
||||
|
@ -279,7 +243,7 @@ static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
|||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
ret = remove_hpte(env, *tsh & H_BULK_REMOVE_PTEX, tsl,
|
||||
ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
|
||||
(*tsh & H_BULK_REMOVE_FLAGS) >> 26,
|
||||
&v, &r);
|
||||
|
||||
|
@ -309,15 +273,15 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
|||
target_ulong pte_index = args[1];
|
||||
target_ulong avpn = args[2];
|
||||
uint64_t token;
|
||||
target_ulong v, r, rb;
|
||||
target_ulong v, r;
|
||||
|
||||
if (!valid_pte_index(env, pte_index)) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
token = ppc_hash64_start_access(cpu, pte_index);
|
||||
v = ppc_hash64_load_hpte0(env, token, 0);
|
||||
r = ppc_hash64_load_hpte1(env, token, 0);
|
||||
v = ppc_hash64_load_hpte0(cpu, token, 0);
|
||||
r = ppc_hash64_load_hpte1(cpu, token, 0);
|
||||
ppc_hash64_stop_access(token);
|
||||
|
||||
if ((v & HPTE64_V_VALID) == 0 ||
|
||||
|
@ -330,12 +294,11 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
|||
r |= (flags << 55) & HPTE64_R_PP0;
|
||||
r |= (flags << 48) & HPTE64_R_KEY_HI;
|
||||
r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
|
||||
rb = compute_tlbie_rb(v, r, pte_index);
|
||||
ppc_hash64_store_hpte(env, pte_index,
|
||||
ppc_hash64_store_hpte(cpu, pte_index,
|
||||
(v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
|
||||
ppc_tlb_invalidate_one(env, rb);
|
||||
ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r);
|
||||
/* Don't need a memory barrier, due to qemu's global lock */
|
||||
ppc_hash64_store_hpte(env, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
|
||||
ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -838,7 +801,7 @@ static target_ulong cas_get_option_vector(int vector, target_ulong table)
|
|||
typedef struct {
|
||||
PowerPCCPU *cpu;
|
||||
uint32_t cpu_version;
|
||||
int ret;
|
||||
Error *err;
|
||||
} SetCompatState;
|
||||
|
||||
static void do_set_compat(void *arg)
|
||||
|
@ -846,7 +809,7 @@ static void do_set_compat(void *arg)
|
|||
SetCompatState *s = arg;
|
||||
|
||||
cpu_synchronize_state(CPU(s->cpu));
|
||||
s->ret = ppc_set_compat(s->cpu, s->cpu_version);
|
||||
ppc_set_compat(s->cpu, s->cpu_version, &s->err);
|
||||
}
|
||||
|
||||
#define get_compat_level(cpuver) ( \
|
||||
|
@ -862,7 +825,8 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
|
|||
target_ulong opcode,
|
||||
target_ulong *args)
|
||||
{
|
||||
target_ulong list = args[0], ov_table;
|
||||
target_ulong list = ppc64_phys_to_real(args[0]);
|
||||
target_ulong ov_table, ov5;
|
||||
PowerPCCPUClass *pcc_ = POWERPC_CPU_GET_CLASS(cpu_);
|
||||
CPUState *cs;
|
||||
bool cpu_match = false, cpu_update = true, memory_update = false;
|
||||
|
@ -876,9 +840,9 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
|
|||
for (counter = 0; counter < 512; ++counter) {
|
||||
uint32_t pvr, pvr_mask;
|
||||
|
||||
pvr_mask = rtas_ld(list, 0);
|
||||
pvr_mask = ldl_be_phys(&address_space_memory, list);
|
||||
list += 4;
|
||||
pvr = rtas_ld(list, 0);
|
||||
pvr = ldl_be_phys(&address_space_memory, list);
|
||||
list += 4;
|
||||
|
||||
trace_spapr_cas_pvr_try(pvr);
|
||||
|
@ -930,13 +894,13 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
|
|||
SetCompatState s = {
|
||||
.cpu = POWERPC_CPU(cs),
|
||||
.cpu_version = cpu_version,
|
||||
.ret = 0
|
||||
.err = NULL,
|
||||
};
|
||||
|
||||
run_on_cpu(cs, do_set_compat, &s);
|
||||
|
||||
if (s.ret < 0) {
|
||||
fprintf(stderr, "Unable to set compatibility mode\n");
|
||||
if (s.err) {
|
||||
error_report_err(s.err);
|
||||
return H_HARDWARE;
|
||||
}
|
||||
}
|
||||
|
@ -949,14 +913,13 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
|
|||
/* For the future use: here @ov_table points to the first option vector */
|
||||
ov_table = list;
|
||||
|
||||
list = cas_get_option_vector(5, ov_table);
|
||||
if (!list) {
|
||||
ov5 = cas_get_option_vector(5, ov_table);
|
||||
if (!ov5) {
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
/* @list now points to OV 5 */
|
||||
list += 2;
|
||||
ov5_byte2 = rtas_ld(list, 0) >> 24;
|
||||
ov5_byte2 = ldub_phys(&address_space_memory, ov5 + 2);
|
||||
if (ov5_byte2 & OV5_DRCONF_MEMORY) {
|
||||
memory_update = true;
|
||||
}
|
||||
|
|
|
@ -229,6 +229,19 @@ static void rtas_stop_self(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
|||
env->msr = 0;
|
||||
}
|
||||
|
||||
static inline int sysparm_st(target_ulong addr, target_ulong len,
|
||||
const void *val, uint16_t vallen)
|
||||
{
|
||||
hwaddr phys = ppc64_phys_to_real(addr);
|
||||
|
||||
if (len < 2) {
|
||||
return RTAS_OUT_SYSPARM_PARAM_ERROR;
|
||||
}
|
||||
stw_be_phys(&address_space_memory, phys, vallen);
|
||||
cpu_physical_memory_write(phys + 2, val, MIN(len - 2, vallen));
|
||||
return RTAS_OUT_SUCCESS;
|
||||
}
|
||||
|
||||
static void rtas_ibm_get_system_parameter(PowerPCCPU *cpu,
|
||||
sPAPRMachineState *spapr,
|
||||
uint32_t token, uint32_t nargs,
|
||||
|
@ -238,7 +251,7 @@ static void rtas_ibm_get_system_parameter(PowerPCCPU *cpu,
|
|||
target_ulong parameter = rtas_ld(args, 0);
|
||||
target_ulong buffer = rtas_ld(args, 1);
|
||||
target_ulong length = rtas_ld(args, 2);
|
||||
target_ulong ret = RTAS_OUT_SUCCESS;
|
||||
target_ulong ret;
|
||||
|
||||
switch (parameter) {
|
||||
case RTAS_SYSPARM_SPLPAR_CHARACTERISTICS: {
|
||||
|
@ -250,18 +263,18 @@ static void rtas_ibm_get_system_parameter(PowerPCCPU *cpu,
|
|||
current_machine->ram_size / M_BYTE,
|
||||
smp_cpus,
|
||||
max_cpus);
|
||||
rtas_st_buffer(buffer, length, (uint8_t *)param_val, strlen(param_val));
|
||||
ret = sysparm_st(buffer, length, param_val, strlen(param_val) + 1);
|
||||
g_free(param_val);
|
||||
break;
|
||||
}
|
||||
case RTAS_SYSPARM_DIAGNOSTICS_RUN_MODE: {
|
||||
uint8_t param_val = DIAGNOSTICS_RUN_MODE_DISABLED;
|
||||
|
||||
rtas_st_buffer(buffer, length, ¶m_val, sizeof(param_val));
|
||||
ret = sysparm_st(buffer, length, ¶m_val, sizeof(param_val));
|
||||
break;
|
||||
}
|
||||
case RTAS_SYSPARM_UUID:
|
||||
rtas_st_buffer(buffer, length, qemu_uuid, (qemu_uuid_set ? 16 : 0));
|
||||
ret = sysparm_st(buffer, length, qemu_uuid, (qemu_uuid_set ? 16 : 0));
|
||||
break;
|
||||
default:
|
||||
ret = RTAS_OUT_NOT_SUPPORTED;
|
||||
|
@ -493,6 +506,13 @@ out:
|
|||
#define CC_VAL_DATA_OFFSET ((CC_IDX_PROP_DATA_OFFSET + 1) * 4)
|
||||
#define CC_WA_LEN 4096
|
||||
|
||||
static void configure_connector_st(target_ulong addr, target_ulong offset,
|
||||
const void *buf, size_t len)
|
||||
{
|
||||
cpu_physical_memory_write(ppc64_phys_to_real(addr + offset),
|
||||
buf, MIN(len, CC_WA_LEN - offset));
|
||||
}
|
||||
|
||||
static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
|
||||
sPAPRMachineState *spapr,
|
||||
uint32_t token, uint32_t nargs,
|
||||
|
@ -558,8 +578,7 @@ static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
|
|||
/* provide the name of the next OF node */
|
||||
wa_offset = CC_VAL_DATA_OFFSET;
|
||||
rtas_st(wa_addr, CC_IDX_NODE_NAME_OFFSET, wa_offset);
|
||||
rtas_st_buffer_direct(wa_addr + wa_offset, CC_WA_LEN - wa_offset,
|
||||
(uint8_t *)name, strlen(name) + 1);
|
||||
configure_connector_st(wa_addr, wa_offset, name, strlen(name) + 1);
|
||||
resp = SPAPR_DR_CC_RESPONSE_NEXT_CHILD;
|
||||
break;
|
||||
case FDT_END_NODE:
|
||||
|
@ -584,8 +603,7 @@ static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
|
|||
/* provide the name of the next OF property */
|
||||
wa_offset = CC_VAL_DATA_OFFSET;
|
||||
rtas_st(wa_addr, CC_IDX_PROP_NAME_OFFSET, wa_offset);
|
||||
rtas_st_buffer_direct(wa_addr + wa_offset, CC_WA_LEN - wa_offset,
|
||||
(uint8_t *)name, strlen(name) + 1);
|
||||
configure_connector_st(wa_addr, wa_offset, name, strlen(name) + 1);
|
||||
|
||||
/* provide the length and value of the OF property. data gets
|
||||
* placed immediately after NULL terminator of the OF property's
|
||||
|
@ -594,9 +612,7 @@ static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
|
|||
wa_offset += strlen(name) + 1,
|
||||
rtas_st(wa_addr, CC_IDX_PROP_LEN, prop_len);
|
||||
rtas_st(wa_addr, CC_IDX_PROP_DATA_OFFSET, wa_offset);
|
||||
rtas_st_buffer_direct(wa_addr + wa_offset, CC_WA_LEN - wa_offset,
|
||||
(uint8_t *)((struct fdt_property *)prop)->data,
|
||||
prop_len);
|
||||
configure_connector_st(wa_addr, wa_offset, prop->data, prop_len);
|
||||
resp = SPAPR_DR_CC_RESPONSE_NEXT_PROPERTY;
|
||||
break;
|
||||
case FDT_END:
|
||||
|
@ -649,17 +665,11 @@ target_ulong spapr_rtas_call(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
|||
|
||||
void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn)
|
||||
{
|
||||
if (!((token >= RTAS_TOKEN_BASE) && (token < RTAS_TOKEN_MAX))) {
|
||||
fprintf(stderr, "RTAS invalid token 0x%x\n", token);
|
||||
exit(1);
|
||||
}
|
||||
assert((token >= RTAS_TOKEN_BASE) && (token < RTAS_TOKEN_MAX));
|
||||
|
||||
token -= RTAS_TOKEN_BASE;
|
||||
if (rtas_table[token].name) {
|
||||
fprintf(stderr, "RTAS call \"%s\" is registered already as 0x%x\n",
|
||||
rtas_table[token].name, token);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
assert(!rtas_table[token].name);
|
||||
|
||||
rtas_table[token].name = name;
|
||||
rtas_table[token].fn = fn;
|
||||
|
|
|
@ -408,14 +408,15 @@ int spapr_allocate_irq_block(int num, bool lsi, bool msi);
|
|||
#define RTAS_SLOT_PERM_ERR_LOG 2
|
||||
|
||||
/* RTAS return codes */
|
||||
#define RTAS_OUT_SUCCESS 0
|
||||
#define RTAS_OUT_NO_ERRORS_FOUND 1
|
||||
#define RTAS_OUT_HW_ERROR -1
|
||||
#define RTAS_OUT_BUSY -2
|
||||
#define RTAS_OUT_PARAM_ERROR -3
|
||||
#define RTAS_OUT_NOT_SUPPORTED -3
|
||||
#define RTAS_OUT_NO_SUCH_INDICATOR -3
|
||||
#define RTAS_OUT_NOT_AUTHORIZED -9002
|
||||
#define RTAS_OUT_SUCCESS 0
|
||||
#define RTAS_OUT_NO_ERRORS_FOUND 1
|
||||
#define RTAS_OUT_HW_ERROR -1
|
||||
#define RTAS_OUT_BUSY -2
|
||||
#define RTAS_OUT_PARAM_ERROR -3
|
||||
#define RTAS_OUT_NOT_SUPPORTED -3
|
||||
#define RTAS_OUT_NO_SUCH_INDICATOR -3
|
||||
#define RTAS_OUT_NOT_AUTHORIZED -9002
|
||||
#define RTAS_OUT_SYSPARM_PARAM_ERROR -9999
|
||||
|
||||
/* RTAS tokens */
|
||||
#define RTAS_TOKEN_BASE 0x2000
|
||||
|
@ -505,25 +506,6 @@ static inline void rtas_st(target_ulong phys, int n, uint32_t val)
|
|||
stl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4*n), val);
|
||||
}
|
||||
|
||||
static inline void rtas_st_buffer_direct(target_ulong phys,
|
||||
target_ulong phys_len,
|
||||
uint8_t *buffer, uint16_t buffer_len)
|
||||
{
|
||||
cpu_physical_memory_write(ppc64_phys_to_real(phys), buffer,
|
||||
MIN(buffer_len, phys_len));
|
||||
}
|
||||
|
||||
static inline void rtas_st_buffer(target_ulong phys, target_ulong phys_len,
|
||||
uint8_t *buffer, uint16_t buffer_len)
|
||||
{
|
||||
if (phys_len < 2) {
|
||||
return;
|
||||
}
|
||||
stw_be_phys(&address_space_memory,
|
||||
ppc64_phys_to_real(phys), buffer_len);
|
||||
rtas_st_buffer_direct(phys + 2, phys_len - 2, buffer, buffer_len);
|
||||
}
|
||||
|
||||
typedef void (*spapr_rtas_fn)(PowerPCCPU *cpu, sPAPRMachineState *sm,
|
||||
uint32_t token,
|
||||
uint32_t nargs, target_ulong args,
|
||||
|
|
|
@ -1139,10 +1139,10 @@
|
|||
"POWER7 v2.3")
|
||||
POWERPC_DEF("POWER7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7,
|
||||
"POWER7+ v2.1")
|
||||
POWERPC_DEF("POWER8E_v1.0", CPU_POWERPC_POWER8E_v10, POWER8,
|
||||
"POWER8E v1.0")
|
||||
POWERPC_DEF("POWER8_v1.0", CPU_POWERPC_POWER8_v10, POWER8,
|
||||
"POWER8 v1.0")
|
||||
POWERPC_DEF("POWER8E_v2.1", CPU_POWERPC_POWER8E_v21, POWER8,
|
||||
"POWER8E v2.1")
|
||||
POWERPC_DEF("POWER8_v2.0", CPU_POWERPC_POWER8_v20, POWER8,
|
||||
"POWER8 v2.0")
|
||||
POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970,
|
||||
"PowerPC 970 v2.2")
|
||||
POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970,
|
||||
|
@ -1390,8 +1390,8 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
|
|||
{ "POWER5gs", "POWER5+_v2.1" },
|
||||
{ "POWER7", "POWER7_v2.3" },
|
||||
{ "POWER7+", "POWER7+_v2.1" },
|
||||
{ "POWER8E", "POWER8E_v1.0" },
|
||||
{ "POWER8", "POWER8_v1.0" },
|
||||
{ "POWER8E", "POWER8E_v2.1" },
|
||||
{ "POWER8", "POWER8_v2.0" },
|
||||
{ "970", "970_v2.2" },
|
||||
{ "970fx", "970fx_v3.1" },
|
||||
{ "970mp", "970mp_v1.1" },
|
||||
|
|
|
@ -557,9 +557,9 @@ enum {
|
|||
CPU_POWERPC_POWER7P_BASE = 0x004A0000,
|
||||
CPU_POWERPC_POWER7P_v21 = 0x004A0201,
|
||||
CPU_POWERPC_POWER8E_BASE = 0x004B0000,
|
||||
CPU_POWERPC_POWER8E_v10 = 0x004B0100,
|
||||
CPU_POWERPC_POWER8E_v21 = 0x004B0201,
|
||||
CPU_POWERPC_POWER8_BASE = 0x004D0000,
|
||||
CPU_POWERPC_POWER8_v10 = 0x004D0100,
|
||||
CPU_POWERPC_POWER8_v20 = 0x004D0200,
|
||||
CPU_POWERPC_970_v22 = 0x00390202,
|
||||
CPU_POWERPC_970FX_v10 = 0x00391100,
|
||||
CPU_POWERPC_970FX_v20 = 0x003C0200,
|
||||
|
|
|
@ -419,6 +419,7 @@ typedef struct ppc_slb_t ppc_slb_t;
|
|||
struct ppc_slb_t {
|
||||
uint64_t esid;
|
||||
uint64_t vsid;
|
||||
const struct ppc_one_seg_page_size *sps;
|
||||
};
|
||||
|
||||
#define MAX_SLB_ENTRIES 64
|
||||
|
@ -686,24 +687,43 @@ enum {
|
|||
|
||||
#define FP_FX (1ull << FPSCR_FX)
|
||||
#define FP_FEX (1ull << FPSCR_FEX)
|
||||
#define FP_OX (1ull << FPSCR_OX)
|
||||
#define FP_OE (1ull << FPSCR_OE)
|
||||
#define FP_UX (1ull << FPSCR_UX)
|
||||
#define FP_UE (1ull << FPSCR_UE)
|
||||
#define FP_XX (1ull << FPSCR_XX)
|
||||
#define FP_XE (1ull << FPSCR_XE)
|
||||
#define FP_ZX (1ull << FPSCR_ZX)
|
||||
#define FP_ZE (1ull << FPSCR_ZE)
|
||||
#define FP_VX (1ull << FPSCR_VX)
|
||||
#define FP_OX (1ull << FPSCR_OX)
|
||||
#define FP_UX (1ull << FPSCR_UX)
|
||||
#define FP_ZX (1ull << FPSCR_ZX)
|
||||
#define FP_XX (1ull << FPSCR_XX)
|
||||
#define FP_VXSNAN (1ull << FPSCR_VXSNAN)
|
||||
#define FP_VXISI (1ull << FPSCR_VXISI)
|
||||
#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
|
||||
#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
|
||||
#define FP_VXIDI (1ull << FPSCR_VXIDI)
|
||||
#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
|
||||
#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
|
||||
#define FP_VXVC (1ull << FPSCR_VXVC)
|
||||
#define FP_FR (1ull << FSPCR_FR)
|
||||
#define FP_FI (1ull << FPSCR_FI)
|
||||
#define FP_C (1ull << FPSCR_C)
|
||||
#define FP_FL (1ull << FPSCR_FL)
|
||||
#define FP_FG (1ull << FPSCR_FG)
|
||||
#define FP_FE (1ull << FPSCR_FE)
|
||||
#define FP_FU (1ull << FPSCR_FU)
|
||||
#define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU)
|
||||
#define FP_FPRF (FP_C | FP_FL | FP_FG | FP_FE | FP_FU)
|
||||
#define FP_VXSOFT (1ull << FPSCR_VXSOFT)
|
||||
#define FP_VXSQRT (1ull << FPSCR_VXSQRT)
|
||||
#define FP_VXCVI (1ull << FPSCR_VXCVI)
|
||||
#define FP_VE (1ull << FPSCR_VE)
|
||||
#define FP_FI (1ull << FPSCR_FI)
|
||||
#define FP_OE (1ull << FPSCR_OE)
|
||||
#define FP_UE (1ull << FPSCR_UE)
|
||||
#define FP_ZE (1ull << FPSCR_ZE)
|
||||
#define FP_XE (1ull << FPSCR_XE)
|
||||
#define FP_NI (1ull << FPSCR_NI)
|
||||
#define FP_RN1 (1ull << FPSCR_RN1)
|
||||
#define FP_RN (1ull << FPSCR_RN)
|
||||
|
||||
/* the exception bits which can be cleared by mcrfs - includes FX */
|
||||
#define FP_EX_CLEAR_BITS (FP_FX | FP_OX | FP_UX | FP_ZX | \
|
||||
FP_XX | FP_VXSNAN | FP_VXISI | FP_VXIDI | \
|
||||
FP_VXZDZ | FP_VXIMZ | FP_VXVC | FP_VXSOFT | \
|
||||
FP_VXSQRT | FP_VXCVI)
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Vector status and control register */
|
||||
|
@ -1210,7 +1230,7 @@ void ppc_store_msr (CPUPPCState *env, target_ulong value);
|
|||
|
||||
void ppc_cpu_list (FILE *f, fprintf_function cpu_fprintf);
|
||||
int ppc_get_compat_smt_threads(PowerPCCPU *cpu);
|
||||
int ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version);
|
||||
void ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version, Error **errp);
|
||||
|
||||
/* Time-base and decrementer management */
|
||||
#ifndef NO_CPU_IO_DEFS
|
||||
|
@ -2355,4 +2375,5 @@ int ppc_get_vcpu_dt_id(PowerPCCPU *cpu);
|
|||
*/
|
||||
PowerPCCPU *ppc_get_vcpu_by_dt_id(int cpu_dt_id);
|
||||
|
||||
void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len);
|
||||
#endif /* !defined (__CPU_PPC_H__) */
|
||||
|
|
|
@ -88,7 +88,7 @@ static int ppc_gdb_register_len(int n)
|
|||
the proper ordering for the binary, and cannot be changed.
|
||||
For system mode, TARGET_WORDS_BIGENDIAN is always set, and we must check
|
||||
the current mode of the chip to see if we're running in little-endian. */
|
||||
static void maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
|
||||
void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (!msr_le) {
|
||||
|
@ -158,7 +158,7 @@ int ppc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
|
|||
break;
|
||||
}
|
||||
}
|
||||
maybe_bswap_register(env, mem_buf, r);
|
||||
ppc_maybe_bswap_register(env, mem_buf, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -214,7 +214,7 @@ int ppc_cpu_gdb_read_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
|
|||
break;
|
||||
}
|
||||
}
|
||||
maybe_bswap_register(env, mem_buf, r);
|
||||
ppc_maybe_bswap_register(env, mem_buf, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -227,7 +227,7 @@ int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
|||
if (!r) {
|
||||
return r;
|
||||
}
|
||||
maybe_bswap_register(env, mem_buf, r);
|
||||
ppc_maybe_bswap_register(env, mem_buf, r);
|
||||
if (n < 32) {
|
||||
/* gprs */
|
||||
env->gpr[n] = ldtul_p(mem_buf);
|
||||
|
@ -277,7 +277,7 @@ int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
|
|||
if (!r) {
|
||||
return r;
|
||||
}
|
||||
maybe_bswap_register(env, mem_buf, r);
|
||||
ppc_maybe_bswap_register(env, mem_buf, r);
|
||||
if (n < 32) {
|
||||
/* gprs */
|
||||
env->gpr[n] = ldq_p(mem_buf);
|
||||
|
|
|
@ -544,6 +544,7 @@ DEF_HELPER_2(74xx_tlbd, void, env, tl)
|
|||
DEF_HELPER_2(74xx_tlbi, void, env, tl)
|
||||
DEF_HELPER_FLAGS_1(tlbia, TCG_CALL_NO_RWG, void, env)
|
||||
DEF_HELPER_FLAGS_2(tlbie, TCG_CALL_NO_RWG, void, env, tl)
|
||||
DEF_HELPER_FLAGS_2(tlbiva, TCG_CALL_NO_RWG, void, env, tl)
|
||||
#if defined(TARGET_PPC64)
|
||||
DEF_HELPER_FLAGS_3(store_slb, TCG_CALL_NO_RWG, void, env, tl, tl)
|
||||
DEF_HELPER_2(load_slb_esid, tl, env, tl)
|
||||
|
|
|
@ -650,8 +650,13 @@ static int kvm_put_fp(CPUState *cs)
|
|||
for (i = 0; i < 32; i++) {
|
||||
uint64_t vsr[2];
|
||||
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
vsr[0] = float64_val(env->fpr[i]);
|
||||
vsr[1] = env->vsr[i];
|
||||
#else
|
||||
vsr[0] = env->vsr[i];
|
||||
vsr[1] = float64_val(env->fpr[i]);
|
||||
#endif
|
||||
reg.addr = (uintptr_t) &vsr;
|
||||
reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
|
||||
|
||||
|
@ -721,10 +726,17 @@ static int kvm_get_fp(CPUState *cs)
|
|||
vsx ? "VSR" : "FPR", i, strerror(errno));
|
||||
return ret;
|
||||
} else {
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
env->fpr[i] = vsr[0];
|
||||
if (vsx) {
|
||||
env->vsr[i] = vsr[1];
|
||||
}
|
||||
#else
|
||||
env->fpr[i] = vsr[1];
|
||||
if (vsx) {
|
||||
env->vsr[i] = vsr[0];
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1193,7 +1205,7 @@ int kvm_arch_get_registers(CPUState *cs)
|
|||
* Only restore valid entries
|
||||
*/
|
||||
if (rb & SLB_ESID_V) {
|
||||
ppc_store_slb(env, rb, rs);
|
||||
ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -98,11 +98,6 @@ static inline int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_l
|
|||
return -1;
|
||||
}
|
||||
|
||||
static inline int kvmppc_read_segment_page_sizes(uint32_t *prop, int maxcells)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
|
||||
{
|
||||
return -1;
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include "hw/boards.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "helper_regs.h"
|
||||
#include "mmu-hash64.h"
|
||||
|
||||
static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
|
||||
{
|
||||
|
@ -169,7 +170,7 @@ static int cpu_post_load(void *opaque, int version_id)
|
|||
env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
|
||||
env->lr = env->spr[SPR_LR];
|
||||
env->ctr = env->spr[SPR_CTR];
|
||||
env->xer = env->spr[SPR_XER];
|
||||
cpu_write_xer(env, env->spr[SPR_XER]);
|
||||
#if defined(TARGET_PPC64)
|
||||
env->cfar = env->spr[SPR_CFAR];
|
||||
#endif
|
||||
|
@ -353,11 +354,30 @@ static bool slb_needed(void *opaque)
|
|||
return (cpu->env.mmu_model & POWERPC_MMU_64);
|
||||
}
|
||||
|
||||
static int slb_post_load(void *opaque, int version_id)
|
||||
{
|
||||
PowerPCCPU *cpu = opaque;
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int i;
|
||||
|
||||
/* We've pulled in the raw esid and vsid values from the migration
|
||||
* stream, but we need to recompute the page size pointers */
|
||||
for (i = 0; i < env->slb_nr; i++) {
|
||||
if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
|
||||
/* Migration source had bad values in its SLB */
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_slb = {
|
||||
.name = "cpu/slb",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = slb_needed,
|
||||
.post_load = slb_post_load,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_INT32_EQUAL(env.slb_nr, PowerPCCPU),
|
||||
VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
|
||||
|
|
|
@ -84,9 +84,10 @@ static int ppc_hash32_pp_prot(int key, int pp, int nx)
|
|||
return prot;
|
||||
}
|
||||
|
||||
static int ppc_hash32_pte_prot(CPUPPCState *env,
|
||||
static int ppc_hash32_pte_prot(PowerPCCPU *cpu,
|
||||
target_ulong sr, ppc_hash_pte32_t pte)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
unsigned pp, key;
|
||||
|
||||
key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS));
|
||||
|
@ -95,9 +96,11 @@ static int ppc_hash32_pte_prot(CPUPPCState *env,
|
|||
return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX));
|
||||
}
|
||||
|
||||
static target_ulong hash32_bat_size(CPUPPCState *env,
|
||||
static target_ulong hash32_bat_size(PowerPCCPU *cpu,
|
||||
target_ulong batu, target_ulong batl)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
if ((msr_pr && !(batu & BATU32_VP))
|
||||
|| (!msr_pr && !(batu & BATU32_VS))) {
|
||||
return 0;
|
||||
|
@ -106,7 +109,7 @@ static target_ulong hash32_bat_size(CPUPPCState *env,
|
|||
return BATU32_BEPI & ~((batu & BATU32_BL) << 15);
|
||||
}
|
||||
|
||||
static int hash32_bat_prot(CPUPPCState *env,
|
||||
static int hash32_bat_prot(PowerPCCPU *cpu,
|
||||
target_ulong batu, target_ulong batl)
|
||||
{
|
||||
int pp, prot;
|
||||
|
@ -122,7 +125,7 @@ static int hash32_bat_prot(CPUPPCState *env,
|
|||
return prot;
|
||||
}
|
||||
|
||||
static target_ulong hash32_bat_601_size(CPUPPCState *env,
|
||||
static target_ulong hash32_bat_601_size(PowerPCCPU *cpu,
|
||||
target_ulong batu, target_ulong batl)
|
||||
{
|
||||
if (!(batl & BATL32_601_V)) {
|
||||
|
@ -132,9 +135,10 @@ static target_ulong hash32_bat_601_size(CPUPPCState *env,
|
|||
return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17);
|
||||
}
|
||||
|
||||
static int hash32_bat_601_prot(CPUPPCState *env,
|
||||
static int hash32_bat_601_prot(PowerPCCPU *cpu,
|
||||
target_ulong batu, target_ulong batl)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int key, pp;
|
||||
|
||||
pp = batu & BATU32_601_PP;
|
||||
|
@ -146,9 +150,10 @@ static int hash32_bat_601_prot(CPUPPCState *env,
|
|||
return ppc_hash32_pp_prot(key, pp, 0);
|
||||
}
|
||||
|
||||
static hwaddr ppc_hash32_bat_lookup(CPUPPCState *env, target_ulong ea, int rwx,
|
||||
static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx,
|
||||
int *prot)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong *BATlt, *BATut;
|
||||
int i;
|
||||
|
||||
|
@ -167,9 +172,9 @@ static hwaddr ppc_hash32_bat_lookup(CPUPPCState *env, target_ulong ea, int rwx,
|
|||
target_ulong mask;
|
||||
|
||||
if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
|
||||
mask = hash32_bat_601_size(env, batu, batl);
|
||||
mask = hash32_bat_601_size(cpu, batu, batl);
|
||||
} else {
|
||||
mask = hash32_bat_size(env, batu, batl);
|
||||
mask = hash32_bat_size(cpu, batu, batl);
|
||||
}
|
||||
LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
|
||||
" BATl " TARGET_FMT_lx "\n", __func__,
|
||||
|
@ -179,9 +184,9 @@ static hwaddr ppc_hash32_bat_lookup(CPUPPCState *env, target_ulong ea, int rwx,
|
|||
hwaddr raddr = (batl & mask) | (ea & ~mask);
|
||||
|
||||
if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
|
||||
*prot = hash32_bat_601_prot(env, batu, batl);
|
||||
*prot = hash32_bat_601_prot(cpu, batu, batl);
|
||||
} else {
|
||||
*prot = hash32_bat_prot(env, batu, batl);
|
||||
*prot = hash32_bat_prot(cpu, batu, batl);
|
||||
}
|
||||
|
||||
return raddr & TARGET_PAGE_MASK;
|
||||
|
@ -210,11 +215,12 @@ static hwaddr ppc_hash32_bat_lookup(CPUPPCState *env, target_ulong ea, int rwx,
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int ppc_hash32_direct_store(CPUPPCState *env, target_ulong sr,
|
||||
static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
|
||||
target_ulong eaddr, int rwx,
|
||||
hwaddr *raddr, int *prot)
|
||||
{
|
||||
CPUState *cs = CPU(ppc_env_get_cpu(env));
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS));
|
||||
|
||||
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
|
||||
|
@ -294,12 +300,14 @@ static int ppc_hash32_direct_store(CPUPPCState *env, target_ulong sr,
|
|||
}
|
||||
}
|
||||
|
||||
hwaddr get_pteg_offset32(CPUPPCState *env, hwaddr hash)
|
||||
hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
return (hash * HASH_PTEG_SIZE_32) & env->htab_mask;
|
||||
}
|
||||
|
||||
static hwaddr ppc_hash32_pteg_search(CPUPPCState *env, hwaddr pteg_off,
|
||||
static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off,
|
||||
bool secondary, target_ulong ptem,
|
||||
ppc_hash_pte32_t *pte)
|
||||
{
|
||||
|
@ -308,8 +316,8 @@ static hwaddr ppc_hash32_pteg_search(CPUPPCState *env, hwaddr pteg_off,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < HPTES_PER_GROUP; i++) {
|
||||
pte0 = ppc_hash32_load_hpte0(env, pte_offset);
|
||||
pte1 = ppc_hash32_load_hpte1(env, pte_offset);
|
||||
pte0 = ppc_hash32_load_hpte0(cpu, pte_offset);
|
||||
pte1 = ppc_hash32_load_hpte1(cpu, pte_offset);
|
||||
|
||||
if ((pte0 & HPTE32_V_VALID)
|
||||
&& (secondary == !!(pte0 & HPTE32_V_SECONDARY))
|
||||
|
@ -325,10 +333,11 @@ static hwaddr ppc_hash32_pteg_search(CPUPPCState *env, hwaddr pteg_off,
|
|||
return -1;
|
||||
}
|
||||
|
||||
static hwaddr ppc_hash32_htab_lookup(CPUPPCState *env,
|
||||
static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
|
||||
target_ulong sr, target_ulong eaddr,
|
||||
ppc_hash_pte32_t *pte)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
hwaddr pteg_off, pte_offset;
|
||||
hwaddr hash;
|
||||
uint32_t vsid, pgidx, ptem;
|
||||
|
@ -349,16 +358,16 @@ static hwaddr ppc_hash32_htab_lookup(CPUPPCState *env,
|
|||
" vsid=%" PRIx32 " ptem=%" PRIx32
|
||||
" hash=" TARGET_FMT_plx "\n",
|
||||
env->htab_base, env->htab_mask, vsid, ptem, hash);
|
||||
pteg_off = get_pteg_offset32(env, hash);
|
||||
pte_offset = ppc_hash32_pteg_search(env, pteg_off, 0, ptem, pte);
|
||||
pteg_off = get_pteg_offset32(cpu, hash);
|
||||
pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte);
|
||||
if (pte_offset == -1) {
|
||||
/* Secondary PTEG lookup */
|
||||
qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
|
||||
" vsid=%" PRIx32 " api=%" PRIx32
|
||||
" hash=" TARGET_FMT_plx "\n", env->htab_base,
|
||||
env->htab_mask, vsid, ptem, ~hash);
|
||||
pteg_off = get_pteg_offset32(env, ~hash);
|
||||
pte_offset = ppc_hash32_pteg_search(env, pteg_off, 1, ptem, pte);
|
||||
pteg_off = get_pteg_offset32(cpu, ~hash);
|
||||
pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte);
|
||||
}
|
||||
|
||||
return pte_offset;
|
||||
|
@ -400,7 +409,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
|
|||
|
||||
/* 2. Check Block Address Translation entries (BATs) */
|
||||
if (env->nb_BATs != 0) {
|
||||
raddr = ppc_hash32_bat_lookup(env, eaddr, rwx, &prot);
|
||||
raddr = ppc_hash32_bat_lookup(cpu, eaddr, rwx, &prot);
|
||||
if (raddr != -1) {
|
||||
if (need_prot[rwx] & ~prot) {
|
||||
if (rwx == 2) {
|
||||
|
@ -431,7 +440,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
|
|||
|
||||
/* 4. Handle direct store segments */
|
||||
if (sr & SR32_T) {
|
||||
if (ppc_hash32_direct_store(env, sr, eaddr, rwx,
|
||||
if (ppc_hash32_direct_store(cpu, sr, eaddr, rwx,
|
||||
&raddr, &prot) == 0) {
|
||||
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK,
|
||||
raddr & TARGET_PAGE_MASK, prot, mmu_idx,
|
||||
|
@ -450,7 +459,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
|
|||
}
|
||||
|
||||
/* 6. Locate the PTE in the hash table */
|
||||
pte_offset = ppc_hash32_htab_lookup(env, sr, eaddr, &pte);
|
||||
pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte);
|
||||
if (pte_offset == -1) {
|
||||
if (rwx == 2) {
|
||||
cs->exception_index = POWERPC_EXCP_ISI;
|
||||
|
@ -473,7 +482,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
|
|||
|
||||
/* 7. Check access permissions */
|
||||
|
||||
prot = ppc_hash32_pte_prot(env, sr, pte);
|
||||
prot = ppc_hash32_pte_prot(cpu, sr, pte);
|
||||
|
||||
if (need_prot[rwx] & ~prot) {
|
||||
/* Access right violation */
|
||||
|
@ -508,7 +517,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
|
|||
}
|
||||
|
||||
if (new_pte1 != pte.pte1) {
|
||||
ppc_hash32_store_hpte1(env, pte_offset, new_pte1);
|
||||
ppc_hash32_store_hpte1(cpu, pte_offset, new_pte1);
|
||||
}
|
||||
|
||||
/* 9. Determine the real address from the PTE */
|
||||
|
@ -521,8 +530,9 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
|
|||
return 0;
|
||||
}
|
||||
|
||||
hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong eaddr)
|
||||
hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong sr;
|
||||
hwaddr pte_offset;
|
||||
ppc_hash_pte32_t pte;
|
||||
|
@ -534,7 +544,7 @@ hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong eaddr)
|
|||
}
|
||||
|
||||
if (env->nb_BATs != 0) {
|
||||
hwaddr raddr = ppc_hash32_bat_lookup(env, eaddr, 0, &prot);
|
||||
hwaddr raddr = ppc_hash32_bat_lookup(cpu, eaddr, 0, &prot);
|
||||
if (raddr != -1) {
|
||||
return raddr;
|
||||
}
|
||||
|
@ -547,7 +557,7 @@ hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong eaddr)
|
|||
return -1;
|
||||
}
|
||||
|
||||
pte_offset = ppc_hash32_htab_lookup(env, sr, eaddr, &pte);
|
||||
pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte);
|
||||
if (pte_offset == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
hwaddr get_pteg_offset32(CPUPPCState *env, hwaddr hash);
|
||||
hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong addr);
|
||||
hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash);
|
||||
hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
|
||||
int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
|
||||
int mmu_idx);
|
||||
|
||||
|
@ -65,40 +65,42 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
|
|||
#define HPTE32_R_WIMG 0x00000078
|
||||
#define HPTE32_R_PP 0x00000003
|
||||
|
||||
static inline target_ulong ppc_hash32_load_hpte0(CPUPPCState *env,
|
||||
static inline target_ulong ppc_hash32_load_hpte0(PowerPCCPU *cpu,
|
||||
hwaddr pte_offset)
|
||||
{
|
||||
CPUState *cs = CPU(ppc_env_get_cpu(env));
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
assert(!env->external_htab); /* Not supported on 32-bit for now */
|
||||
return ldl_phys(cs->as, env->htab_base + pte_offset);
|
||||
return ldl_phys(CPU(cpu)->as, env->htab_base + pte_offset);
|
||||
}
|
||||
|
||||
static inline target_ulong ppc_hash32_load_hpte1(CPUPPCState *env,
|
||||
static inline target_ulong ppc_hash32_load_hpte1(PowerPCCPU *cpu,
|
||||
hwaddr pte_offset)
|
||||
{
|
||||
CPUState *cs = CPU(ppc_env_get_cpu(env));
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
assert(!env->external_htab); /* Not supported on 32-bit for now */
|
||||
return ldl_phys(cs->as, env->htab_base + pte_offset + HASH_PTE_SIZE_32/2);
|
||||
return ldl_phys(CPU(cpu)->as,
|
||||
env->htab_base + pte_offset + HASH_PTE_SIZE_32 / 2);
|
||||
}
|
||||
|
||||
static inline void ppc_hash32_store_hpte0(CPUPPCState *env,
|
||||
static inline void ppc_hash32_store_hpte0(PowerPCCPU *cpu,
|
||||
hwaddr pte_offset, target_ulong pte0)
|
||||
{
|
||||
CPUState *cs = CPU(ppc_env_get_cpu(env));
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
assert(!env->external_htab); /* Not supported on 32-bit for now */
|
||||
stl_phys(cs->as, env->htab_base + pte_offset, pte0);
|
||||
stl_phys(CPU(cpu)->as, env->htab_base + pte_offset, pte0);
|
||||
}
|
||||
|
||||
static inline void ppc_hash32_store_hpte1(CPUPPCState *env,
|
||||
static inline void ppc_hash32_store_hpte1(PowerPCCPU *cpu,
|
||||
hwaddr pte_offset, target_ulong pte1)
|
||||
{
|
||||
CPUState *cs = CPU(ppc_env_get_cpu(env));
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
assert(!env->external_htab); /* Not supported on 32-bit for now */
|
||||
stl_phys(cs->as, env->htab_base + pte_offset + HASH_PTE_SIZE_32/2, pte1);
|
||||
stl_phys(CPU(cpu)->as,
|
||||
env->htab_base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1);
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -20,7 +20,9 @@
|
|||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "kvm_ppc.h"
|
||||
#include "mmu-hash64.h"
|
||||
|
||||
|
@ -41,8 +43,9 @@ bool kvmppc_kern_htab;
|
|||
* SLB handling
|
||||
*/
|
||||
|
||||
static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
|
||||
static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
uint64_t esid_256M, esid_1T;
|
||||
int n;
|
||||
|
||||
|
@ -70,12 +73,13 @@ static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
|
||||
void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int i;
|
||||
uint64_t slbe, slbv;
|
||||
|
||||
cpu_synchronize_state(CPU(ppc_env_get_cpu(env)));
|
||||
cpu_synchronize_state(CPU(cpu));
|
||||
|
||||
cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
|
||||
for (i = 0; i < env->slb_nr; i++) {
|
||||
|
@ -118,7 +122,7 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
|
|||
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
||||
ppc_slb_t *slb;
|
||||
|
||||
slb = slb_lookup(env, addr);
|
||||
slb = slb_lookup(cpu, addr);
|
||||
if (!slb) {
|
||||
return;
|
||||
}
|
||||
|
@ -134,35 +138,62 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
|
|||
}
|
||||
}
|
||||
|
||||
int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
|
||||
int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
|
||||
target_ulong esid, target_ulong vsid)
|
||||
{
|
||||
int slot = rb & 0xfff;
|
||||
CPUPPCState *env = &cpu->env;
|
||||
ppc_slb_t *slb = &env->slb[slot];
|
||||
const struct ppc_one_seg_page_size *sps = NULL;
|
||||
int i;
|
||||
|
||||
if (rb & (0x1000 - env->slb_nr)) {
|
||||
return -1; /* Reserved bits set or slot too high */
|
||||
if (slot >= env->slb_nr) {
|
||||
return -1; /* Bad slot number */
|
||||
}
|
||||
if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
|
||||
if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
|
||||
return -1; /* Reserved bits set */
|
||||
}
|
||||
if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
|
||||
return -1; /* Bad segment size */
|
||||
}
|
||||
if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
|
||||
if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
|
||||
return -1; /* 1T segment on MMU that doesn't support it */
|
||||
}
|
||||
|
||||
/* Mask out the slot number as we store the entry */
|
||||
slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
|
||||
slb->vsid = rs;
|
||||
for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
|
||||
const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
|
||||
|
||||
if (!sps1->page_shift) {
|
||||
break;
|
||||
}
|
||||
|
||||
if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
|
||||
sps = sps1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!sps) {
|
||||
error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
|
||||
" esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
|
||||
slot, esid, vsid);
|
||||
return -1;
|
||||
}
|
||||
|
||||
slb->esid = esid;
|
||||
slb->vsid = vsid;
|
||||
slb->sps = sps;
|
||||
|
||||
LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
|
||||
" %016" PRIx64 "\n", __func__, slot, rb, rs,
|
||||
" %016" PRIx64 "\n", __func__, slot, esid, vsid,
|
||||
slb->esid, slb->vsid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb,
|
||||
static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
|
||||
target_ulong *rt)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int slot = rb & 0xfff;
|
||||
ppc_slb_t *slb = &env->slb[slot];
|
||||
|
||||
|
@ -174,9 +205,10 @@ static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb,
|
||||
static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
|
||||
target_ulong *rt)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int slot = rb & 0xfff;
|
||||
ppc_slb_t *slb = &env->slb[slot];
|
||||
|
||||
|
@ -190,7 +222,9 @@ static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb,
|
|||
|
||||
void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
|
||||
{
|
||||
if (ppc_store_slb(env, rb, rs) < 0) {
|
||||
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
||||
|
||||
if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
|
||||
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
|
||||
POWERPC_EXCP_INVAL);
|
||||
}
|
||||
|
@ -198,9 +232,10 @@ void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
|
|||
|
||||
target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
|
||||
{
|
||||
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
||||
target_ulong rt = 0;
|
||||
|
||||
if (ppc_load_slb_esid(env, rb, &rt) < 0) {
|
||||
if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
|
||||
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
|
||||
POWERPC_EXCP_INVAL);
|
||||
}
|
||||
|
@ -209,9 +244,10 @@ target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
|
|||
|
||||
target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
|
||||
{
|
||||
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
||||
target_ulong rt = 0;
|
||||
|
||||
if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
|
||||
if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
|
||||
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
|
||||
POWERPC_EXCP_INVAL);
|
||||
}
|
||||
|
@ -222,9 +258,10 @@ target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
|
|||
* 64-bit hash table MMU handling
|
||||
*/
|
||||
|
||||
static int ppc_hash64_pte_prot(CPUPPCState *env,
|
||||
static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
|
||||
ppc_slb_t *slb, ppc_hash_pte64_t pte)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
unsigned pp, key;
|
||||
/* Some pp bit combinations have undefined behaviour, so default
|
||||
* to no access in those cases */
|
||||
|
@ -274,12 +311,12 @@ static int ppc_hash64_pte_prot(CPUPPCState *env,
|
|||
return prot;
|
||||
}
|
||||
|
||||
static int ppc_hash64_amr_prot(CPUPPCState *env, ppc_hash_pte64_t pte)
|
||||
static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int key, amrbits;
|
||||
int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
|
||||
|
||||
/* Only recent MMUs implement Virtual Page Class Key Protection */
|
||||
if (!(env->mmu_model & POWERPC_MMU_AMR)) {
|
||||
return prot;
|
||||
|
@ -348,23 +385,24 @@ void ppc_hash64_stop_access(uint64_t token)
|
|||
}
|
||||
}
|
||||
|
||||
static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr hash,
|
||||
static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
|
||||
bool secondary, target_ulong ptem,
|
||||
ppc_hash_pte64_t *pte)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int i;
|
||||
uint64_t token;
|
||||
target_ulong pte0, pte1;
|
||||
target_ulong pte_index;
|
||||
|
||||
pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
|
||||
token = ppc_hash64_start_access(ppc_env_get_cpu(env), pte_index);
|
||||
token = ppc_hash64_start_access(cpu, pte_index);
|
||||
if (!token) {
|
||||
return -1;
|
||||
}
|
||||
for (i = 0; i < HPTES_PER_GROUP; i++) {
|
||||
pte0 = ppc_hash64_load_hpte0(env, token, i);
|
||||
pte1 = ppc_hash64_load_hpte1(env, token, i);
|
||||
pte0 = ppc_hash64_load_hpte0(cpu, token, i);
|
||||
pte1 = ppc_hash64_load_hpte1(cpu, token, i);
|
||||
|
||||
if ((pte0 & HPTE64_V_VALID)
|
||||
&& (secondary == !!(pte0 & HPTE64_V_SECONDARY))
|
||||
|
@ -382,45 +420,31 @@ static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr hash,
|
|||
return -1;
|
||||
}
|
||||
|
||||
static uint64_t ppc_hash64_page_shift(ppc_slb_t *slb)
|
||||
{
|
||||
uint64_t epnshift;
|
||||
|
||||
/* Page size according to the SLB, which we use to generate the
|
||||
* EPN for hash table lookup.. When we implement more recent MMU
|
||||
* extensions this might be different from the actual page size
|
||||
* encoded in the PTE */
|
||||
if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_4K) {
|
||||
epnshift = TARGET_PAGE_BITS;
|
||||
} else if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_64K) {
|
||||
epnshift = TARGET_PAGE_BITS_64K;
|
||||
} else {
|
||||
epnshift = TARGET_PAGE_BITS_16M;
|
||||
}
|
||||
return epnshift;
|
||||
}
|
||||
|
||||
static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
|
||||
static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
|
||||
ppc_slb_t *slb, target_ulong eaddr,
|
||||
ppc_hash_pte64_t *pte)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
hwaddr pte_offset;
|
||||
hwaddr hash;
|
||||
uint64_t vsid, epnshift, epnmask, epn, ptem;
|
||||
uint64_t vsid, epnmask, epn, ptem;
|
||||
|
||||
epnshift = ppc_hash64_page_shift(slb);
|
||||
epnmask = ~((1ULL << epnshift) - 1);
|
||||
/* The SLB store path should prevent any bad page size encodings
|
||||
* getting in there, so: */
|
||||
assert(slb->sps);
|
||||
|
||||
epnmask = ~((1ULL << slb->sps->page_shift) - 1);
|
||||
|
||||
if (slb->vsid & SLB_VSID_B) {
|
||||
/* 1TB segment */
|
||||
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
|
||||
epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
|
||||
hash = vsid ^ (vsid << 25) ^ (epn >> epnshift);
|
||||
hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift);
|
||||
} else {
|
||||
/* 256M segment */
|
||||
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
|
||||
epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
|
||||
hash = vsid ^ (epn >> epnshift);
|
||||
hash = vsid ^ (epn >> slb->sps->page_shift);
|
||||
}
|
||||
ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
|
||||
|
||||
|
@ -436,7 +460,7 @@ static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
|
|||
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
|
||||
" hash=" TARGET_FMT_plx "\n",
|
||||
env->htab_base, env->htab_mask, vsid, ptem, hash);
|
||||
pte_offset = ppc_hash64_pteg_search(env, hash, 0, ptem, pte);
|
||||
pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte);
|
||||
|
||||
if (pte_offset == -1) {
|
||||
/* Secondary PTEG lookup */
|
||||
|
@ -446,24 +470,82 @@ static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
|
|||
" hash=" TARGET_FMT_plx "\n", env->htab_base,
|
||||
env->htab_mask, vsid, ptem, ~hash);
|
||||
|
||||
pte_offset = ppc_hash64_pteg_search(env, ~hash, 1, ptem, pte);
|
||||
pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte);
|
||||
}
|
||||
|
||||
return pte_offset;
|
||||
}
|
||||
|
||||
static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
|
||||
target_ulong eaddr)
|
||||
static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
|
||||
uint64_t pte0, uint64_t pte1)
|
||||
{
|
||||
hwaddr mask;
|
||||
int target_page_bits;
|
||||
hwaddr rpn = pte.pte1 & HPTE64_R_RPN;
|
||||
int i;
|
||||
|
||||
if (!(pte0 & HPTE64_V_LARGE)) {
|
||||
if (sps->page_shift != 12) {
|
||||
/* 4kiB page in a non 4kiB segment */
|
||||
return 0;
|
||||
}
|
||||
/* Normal 4kiB page */
|
||||
return 12;
|
||||
}
|
||||
|
||||
for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
|
||||
const struct ppc_one_page_size *ps = &sps->enc[i];
|
||||
uint64_t mask;
|
||||
|
||||
if (!ps->page_shift) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (ps->page_shift == 12) {
|
||||
/* L bit is set so this can't be a 4kiB page */
|
||||
continue;
|
||||
}
|
||||
|
||||
mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
|
||||
|
||||
if ((pte1 & mask) == (ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
|
||||
return ps->page_shift;
|
||||
}
|
||||
}
|
||||
|
||||
return 0; /* Bad page size encoding */
|
||||
}
|
||||
|
||||
unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
|
||||
uint64_t pte0, uint64_t pte1,
|
||||
unsigned *seg_page_shift)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int i;
|
||||
|
||||
if (!(pte0 & HPTE64_V_LARGE)) {
|
||||
*seg_page_shift = 12;
|
||||
return 12;
|
||||
}
|
||||
|
||||
/*
|
||||
* We support 4K, 64K and 16M now
|
||||
* The encodings in env->sps need to be carefully chosen so that
|
||||
* this gives an unambiguous result.
|
||||
*/
|
||||
target_page_bits = ppc_hash64_page_shift(slb);
|
||||
mask = (1ULL << target_page_bits) - 1;
|
||||
return (rpn & ~mask) | (eaddr & mask);
|
||||
for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
|
||||
const struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
|
||||
unsigned shift;
|
||||
|
||||
if (!sps->page_shift) {
|
||||
break;
|
||||
}
|
||||
|
||||
shift = hpte_page_shift(sps, pte0, pte1);
|
||||
if (shift) {
|
||||
*seg_page_shift = sps->page_shift;
|
||||
return shift;
|
||||
}
|
||||
}
|
||||
|
||||
*seg_page_shift = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
|
||||
|
@ -472,6 +554,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
|
|||
CPUState *cs = CPU(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
ppc_slb_t *slb;
|
||||
unsigned apshift;
|
||||
hwaddr pte_offset;
|
||||
ppc_hash_pte64_t pte;
|
||||
int pp_prot, amr_prot, prot;
|
||||
|
@ -493,7 +576,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
|
|||
}
|
||||
|
||||
/* 2. Translation is on, so look up the SLB */
|
||||
slb = slb_lookup(env, eaddr);
|
||||
slb = slb_lookup(cpu, eaddr);
|
||||
|
||||
if (!slb) {
|
||||
if (rwx == 2) {
|
||||
|
@ -515,7 +598,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
|
|||
}
|
||||
|
||||
/* 4. Locate the PTE in the hash table */
|
||||
pte_offset = ppc_hash64_htab_lookup(env, slb, eaddr, &pte);
|
||||
pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte);
|
||||
if (pte_offset == -1) {
|
||||
if (rwx == 2) {
|
||||
cs->exception_index = POWERPC_EXCP_ISI;
|
||||
|
@ -535,10 +618,22 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
|
|||
qemu_log_mask(CPU_LOG_MMU,
|
||||
"found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
|
||||
|
||||
/* Validate page size encoding */
|
||||
apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
|
||||
if (!apshift) {
|
||||
error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64
|
||||
" @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset);
|
||||
/* Not entirely sure what the right action here, but machine
|
||||
* check seems reasonable */
|
||||
cs->exception_index = POWERPC_EXCP_MCHECK;
|
||||
env->error_code = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* 5. Check access permissions */
|
||||
|
||||
pp_prot = ppc_hash64_pte_prot(env, slb, pte);
|
||||
amr_prot = ppc_hash64_amr_prot(env, pte);
|
||||
pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
|
||||
amr_prot = ppc_hash64_amr_prot(cpu, pte);
|
||||
prot = pp_prot & amr_prot;
|
||||
|
||||
if ((need_prot[rwx] & ~prot) != 0) {
|
||||
|
@ -581,49 +676,57 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
|
|||
}
|
||||
|
||||
if (new_pte1 != pte.pte1) {
|
||||
ppc_hash64_store_hpte(env, pte_offset / HASH_PTE_SIZE_64,
|
||||
ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64,
|
||||
pte.pte0, new_pte1);
|
||||
}
|
||||
|
||||
/* 7. Determine the real address from the PTE */
|
||||
|
||||
raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);
|
||||
raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
|
||||
|
||||
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
|
||||
prot, mmu_idx, TARGET_PAGE_SIZE);
|
||||
prot, mmu_idx, 1ULL << apshift);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr)
|
||||
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
ppc_slb_t *slb;
|
||||
hwaddr pte_offset;
|
||||
ppc_hash_pte64_t pte;
|
||||
unsigned apshift;
|
||||
|
||||
if (msr_dr == 0) {
|
||||
/* In real mode the top 4 effective address bits are ignored */
|
||||
return addr & 0x0FFFFFFFFFFFFFFFULL;
|
||||
}
|
||||
|
||||
slb = slb_lookup(env, addr);
|
||||
slb = slb_lookup(cpu, addr);
|
||||
if (!slb) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
pte_offset = ppc_hash64_htab_lookup(env, slb, addr, &pte);
|
||||
pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
|
||||
if (pte_offset == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
|
||||
apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
|
||||
if (!apshift) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
|
||||
& TARGET_PAGE_MASK;
|
||||
}
|
||||
|
||||
void ppc_hash64_store_hpte(CPUPPCState *env,
|
||||
void ppc_hash64_store_hpte(PowerPCCPU *cpu,
|
||||
target_ulong pte_index,
|
||||
target_ulong pte0, target_ulong pte1)
|
||||
{
|
||||
CPUState *cs = CPU(ppc_env_get_cpu(env));
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
if (kvmppc_kern_htab) {
|
||||
kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
|
||||
|
@ -633,9 +736,22 @@ void ppc_hash64_store_hpte(CPUPPCState *env,
|
|||
pte_index *= HASH_PTE_SIZE_64;
|
||||
if (env->external_htab) {
|
||||
stq_p(env->external_htab + pte_index, pte0);
|
||||
stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64/2, pte1);
|
||||
stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
|
||||
} else {
|
||||
stq_phys(cs->as, env->htab_base + pte_index, pte0);
|
||||
stq_phys(cs->as, env->htab_base + pte_index + HASH_PTE_SIZE_64/2, pte1);
|
||||
stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0);
|
||||
stq_phys(CPU(cpu)->as,
|
||||
env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
|
||||
}
|
||||
}
|
||||
|
||||
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
|
||||
target_ulong pte_index,
|
||||
target_ulong pte0, target_ulong pte1)
|
||||
{
|
||||
/*
|
||||
* XXX: given the fact that there are too many segments to
|
||||
* invalidate, and we still don't have a tlb_flush_mask(env, n,
|
||||
* mask) in QEMU, we just invalidate all TLBs
|
||||
*/
|
||||
tlb_flush(CPU(cpu), 1);
|
||||
}
|
||||
|
|
|
@ -4,13 +4,21 @@
|
|||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
#ifdef TARGET_PPC64
|
||||
void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env);
|
||||
int ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs);
|
||||
hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr);
|
||||
void ppc_hash64_check_page_sizes(PowerPCCPU *cpu, Error **errp);
|
||||
void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu);
|
||||
int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
|
||||
target_ulong esid, target_ulong vsid);
|
||||
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
|
||||
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
|
||||
int mmu_idx);
|
||||
void ppc_hash64_store_hpte(CPUPPCState *env, target_ulong index,
|
||||
void ppc_hash64_store_hpte(PowerPCCPU *cpu, target_ulong index,
|
||||
target_ulong pte0, target_ulong pte1);
|
||||
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
|
||||
target_ulong pte_index,
|
||||
target_ulong pte0, target_ulong pte1);
|
||||
unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
|
||||
uint64_t pte0, uint64_t pte1,
|
||||
unsigned *seg_page_shift);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -40,6 +48,8 @@ void ppc_hash64_store_hpte(CPUPPCState *env, target_ulong index,
|
|||
#define SLB_VSID_LLP_MASK (SLB_VSID_L | SLB_VSID_LP)
|
||||
#define SLB_VSID_4K 0x0000000000000000ULL
|
||||
#define SLB_VSID_64K 0x0000000000000110ULL
|
||||
#define SLB_VSID_16M 0x0000000000000100ULL
|
||||
#define SLB_VSID_16G 0x0000000000000120ULL
|
||||
|
||||
/*
|
||||
* Hash page table definitions
|
||||
|
@ -85,31 +95,31 @@ extern bool kvmppc_kern_htab;
|
|||
uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index);
|
||||
void ppc_hash64_stop_access(uint64_t token);
|
||||
|
||||
static inline target_ulong ppc_hash64_load_hpte0(CPUPPCState *env,
|
||||
static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu,
|
||||
uint64_t token, int index)
|
||||
{
|
||||
CPUState *cs = CPU(ppc_env_get_cpu(env));
|
||||
CPUPPCState *env = &cpu->env;
|
||||
uint64_t addr;
|
||||
|
||||
addr = token + (index * HASH_PTE_SIZE_64);
|
||||
if (env->external_htab) {
|
||||
return ldq_p((const void *)(uintptr_t)addr);
|
||||
} else {
|
||||
return ldq_phys(cs->as, addr);
|
||||
return ldq_phys(CPU(cpu)->as, addr);
|
||||
}
|
||||
}
|
||||
|
||||
static inline target_ulong ppc_hash64_load_hpte1(CPUPPCState *env,
|
||||
static inline target_ulong ppc_hash64_load_hpte1(PowerPCCPU *cpu,
|
||||
uint64_t token, int index)
|
||||
{
|
||||
CPUState *cs = CPU(ppc_env_get_cpu(env));
|
||||
CPUPPCState *env = &cpu->env;
|
||||
uint64_t addr;
|
||||
|
||||
addr = token + (index * HASH_PTE_SIZE_64) + HASH_PTE_SIZE_64/2;
|
||||
if (env->external_htab) {
|
||||
return ldq_p((const void *)(uintptr_t)addr);
|
||||
} else {
|
||||
return ldq_phys(cs->as, addr);
|
||||
return ldq_phys(CPU(cpu)->as, addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -658,32 +658,6 @@ static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
|
|||
tlb_flush(CPU(cpu), 1);
|
||||
}
|
||||
|
||||
static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState *env,
|
||||
target_ulong eaddr, uint32_t pid)
|
||||
{
|
||||
#if !defined(FLUSH_ALL_TLBS)
|
||||
CPUState *cs = CPU(ppc_env_get_cpu(env));
|
||||
ppcemb_tlb_t *tlb;
|
||||
hwaddr raddr;
|
||||
target_ulong page, end;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < env->nb_tlb; i++) {
|
||||
tlb = &env->tlb.tlbe[i];
|
||||
if (ppcemb_tlb_check(env, tlb, &raddr, eaddr, pid, 0, i) == 0) {
|
||||
end = tlb->EPN + tlb->size;
|
||||
for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
|
||||
tlb_flush_page(cs, page);
|
||||
}
|
||||
tlb->prot &= ~PAGE_VALID;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#else
|
||||
ppc4xx_tlb_invalidate_all(env);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
|
||||
target_ulong address, int rw,
|
||||
int access_type)
|
||||
|
@ -1298,7 +1272,7 @@ void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
|
|||
case POWERPC_MMU_2_06a:
|
||||
case POWERPC_MMU_2_07:
|
||||
case POWERPC_MMU_2_07a:
|
||||
dump_slb(f, cpu_fprintf, env);
|
||||
dump_slb(f, cpu_fprintf, ppc_env_get_cpu(env));
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
|
@ -1440,12 +1414,12 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
|||
case POWERPC_MMU_2_06a:
|
||||
case POWERPC_MMU_2_07:
|
||||
case POWERPC_MMU_2_07a:
|
||||
return ppc_hash64_get_phys_page_debug(env, addr);
|
||||
return ppc_hash64_get_phys_page_debug(cpu, addr);
|
||||
#endif
|
||||
|
||||
case POWERPC_MMU_32B:
|
||||
case POWERPC_MMU_601:
|
||||
return ppc_hash32_get_phys_page_debug(env, addr);
|
||||
return ppc_hash32_get_phys_page_debug(cpu, addr);
|
||||
|
||||
default:
|
||||
;
|
||||
|
@ -1511,6 +1485,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
|
|||
int rw, int mmu_idx)
|
||||
{
|
||||
CPUState *cs = CPU(ppc_env_get_cpu(env));
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
mmu_ctx_t ctx;
|
||||
int access_type;
|
||||
int ret = 0;
|
||||
|
@ -1612,9 +1587,9 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
|
|||
tlb_miss:
|
||||
env->error_code |= ctx.key << 19;
|
||||
env->spr[SPR_HASH1] = env->htab_base +
|
||||
get_pteg_offset32(env, ctx.hash[0]);
|
||||
get_pteg_offset32(cpu, ctx.hash[0]);
|
||||
env->spr[SPR_HASH2] = env->htab_base +
|
||||
get_pteg_offset32(env, ctx.hash[1]);
|
||||
get_pteg_offset32(cpu, ctx.hash[1]);
|
||||
break;
|
||||
case POWERPC_MMU_SOFT_74xx:
|
||||
if (rw == 1) {
|
||||
|
@ -1971,25 +1946,6 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
|
|||
ppc6xx_tlb_invalidate_virt(env, addr, 1);
|
||||
}
|
||||
break;
|
||||
case POWERPC_MMU_SOFT_4xx:
|
||||
case POWERPC_MMU_SOFT_4xx_Z:
|
||||
ppc4xx_tlb_invalidate_virt(env, addr, env->spr[SPR_40x_PID]);
|
||||
break;
|
||||
case POWERPC_MMU_REAL:
|
||||
cpu_abort(CPU(cpu), "No TLB for PowerPC 4xx in real mode\n");
|
||||
break;
|
||||
case POWERPC_MMU_MPC8xx:
|
||||
/* XXX: TODO */
|
||||
cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n");
|
||||
break;
|
||||
case POWERPC_MMU_BOOKE:
|
||||
/* XXX: TODO */
|
||||
cpu_abort(CPU(cpu), "BookE MMU model is not implemented\n");
|
||||
break;
|
||||
case POWERPC_MMU_BOOKE206:
|
||||
/* XXX: TODO */
|
||||
cpu_abort(CPU(cpu), "BookE 2.06 MMU model is not implemented\n");
|
||||
break;
|
||||
case POWERPC_MMU_32B:
|
||||
case POWERPC_MMU_601:
|
||||
/* tlbie invalidate TLBs for all segments */
|
||||
|
@ -2031,9 +1987,8 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
|
|||
break;
|
||||
#endif /* defined(TARGET_PPC64) */
|
||||
default:
|
||||
/* XXX: TODO */
|
||||
cpu_abort(CPU(cpu), "Unknown MMU model\n");
|
||||
break;
|
||||
/* Should never reach here with other MMU models */
|
||||
assert(0);
|
||||
}
|
||||
#else
|
||||
ppc_tlb_invalidate_all(env);
|
||||
|
@ -2088,21 +2043,17 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
|
|||
(int)srnum, value, env->sr[srnum]);
|
||||
#if defined(TARGET_PPC64)
|
||||
if (env->mmu_model & POWERPC_MMU_64) {
|
||||
uint64_t rb = 0, rs = 0;
|
||||
uint64_t esid, vsid;
|
||||
|
||||
/* ESID = srnum */
|
||||
rb |= ((uint32_t)srnum & 0xf) << 28;
|
||||
/* Set the valid bit */
|
||||
rb |= SLB_ESID_V;
|
||||
/* Index = ESID */
|
||||
rb |= (uint32_t)srnum;
|
||||
esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
|
||||
|
||||
/* VSID = VSID */
|
||||
rs |= (value & 0xfffffff) << 12;
|
||||
vsid = (value & 0xfffffff) << 12;
|
||||
/* flags = flags */
|
||||
rs |= ((value >> 27) & 0xf) << 8;
|
||||
vsid |= ((value >> 27) & 0xf) << 8;
|
||||
|
||||
ppc_store_slb(env, rb, rs);
|
||||
ppc_store_slb(cpu, srnum, esid, vsid);
|
||||
} else
|
||||
#endif
|
||||
if (env->sr[srnum] != value) {
|
||||
|
@ -2136,6 +2087,16 @@ void helper_tlbie(CPUPPCState *env, target_ulong addr)
|
|||
ppc_tlb_invalidate_one(env, addr);
|
||||
}
|
||||
|
||||
void helper_tlbiva(CPUPPCState *env, target_ulong addr)
|
||||
{
|
||||
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
||||
|
||||
/* tlbiva instruction only exists on BookE */
|
||||
assert(env->mmu_model == POWERPC_MMU_BOOKE);
|
||||
/* XXX: TODO */
|
||||
cpu_abort(CPU(cpu), "BookE MMU model is not implemented\n");
|
||||
}
|
||||
|
||||
/* Software driven TLBs management */
|
||||
/* PowerPC 602/603 software TLB load instructions helpers */
|
||||
static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
|
||||
|
|
|
@ -2501,18 +2501,31 @@ static void gen_fmrgow(DisasContext *ctx)
|
|||
static void gen_mcrfs(DisasContext *ctx)
|
||||
{
|
||||
TCGv tmp = tcg_temp_new();
|
||||
TCGv_i32 tmask;
|
||||
TCGv_i64 tnew_fpscr = tcg_temp_new_i64();
|
||||
int bfa;
|
||||
int nibble;
|
||||
int shift;
|
||||
|
||||
if (unlikely(!ctx->fpu_enabled)) {
|
||||
gen_exception(ctx, POWERPC_EXCP_FPU);
|
||||
return;
|
||||
}
|
||||
bfa = 4 * (7 - crfS(ctx->opcode));
|
||||
tcg_gen_shri_tl(tmp, cpu_fpscr, bfa);
|
||||
bfa = crfS(ctx->opcode);
|
||||
nibble = 7 - bfa;
|
||||
shift = 4 * nibble;
|
||||
tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
|
||||
tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
|
||||
tcg_temp_free(tmp);
|
||||
tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
|
||||
tcg_gen_andi_tl(cpu_fpscr, cpu_fpscr, ~(0xF << bfa));
|
||||
tcg_temp_free(tmp);
|
||||
tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
|
||||
/* Only the exception bits (including FX) should be cleared if read */
|
||||
tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
|
||||
/* FEX and VX need to be updated, so don't set fpscr directly */
|
||||
tmask = tcg_const_i32(1 << nibble);
|
||||
gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
|
||||
tcg_temp_free_i32(tmask);
|
||||
tcg_temp_free_i64(tnew_fpscr);
|
||||
}
|
||||
|
||||
/* mffs */
|
||||
|
@ -5905,7 +5918,7 @@ static void gen_tlbiva(DisasContext *ctx)
|
|||
}
|
||||
t0 = tcg_temp_new();
|
||||
gen_addr_reg_index(ctx, t0);
|
||||
gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
|
||||
gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
|
||||
tcg_temp_free(t0);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -8105,6 +8105,36 @@ static Property powerpc_servercpu_properties[] = {
|
|||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
static const struct ppc_segment_page_sizes POWER7_POWER8_sps = {
|
||||
.sps = {
|
||||
{
|
||||
.page_shift = 12, /* 4K */
|
||||
.slb_enc = 0,
|
||||
.enc = { { .page_shift = 12, .pte_enc = 0 },
|
||||
{ .page_shift = 16, .pte_enc = 0x7 },
|
||||
{ .page_shift = 24, .pte_enc = 0x38 }, },
|
||||
},
|
||||
{
|
||||
.page_shift = 16, /* 64K */
|
||||
.slb_enc = SLB_VSID_64K,
|
||||
.enc = { { .page_shift = 16, .pte_enc = 0x1 },
|
||||
{ .page_shift = 24, .pte_enc = 0x8 }, },
|
||||
},
|
||||
{
|
||||
.page_shift = 24, /* 16M */
|
||||
.slb_enc = SLB_VSID_16M,
|
||||
.enc = { { .page_shift = 24, .pte_enc = 0 }, },
|
||||
},
|
||||
{
|
||||
.page_shift = 34, /* 16G */
|
||||
.slb_enc = SLB_VSID_16G,
|
||||
.enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
|
||||
},
|
||||
}
|
||||
};
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
|
||||
static void init_proc_POWER7 (CPUPPCState *env)
|
||||
{
|
||||
init_proc_book3s_64(env, BOOK3S_CPU_POWER7);
|
||||
|
@ -8168,6 +8198,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
|
|||
pcc->mmu_model = POWERPC_MMU_2_06;
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
|
||||
pcc->sps = &POWER7_POWER8_sps;
|
||||
#endif
|
||||
pcc->excp_model = POWERPC_EXCP_POWER7;
|
||||
pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
|
||||
|
@ -8248,6 +8279,7 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
|
|||
pcc->mmu_model = POWERPC_MMU_2_07;
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
|
||||
pcc->sps = &POWER7_POWER8_sps;
|
||||
#endif
|
||||
pcc->excp_model = POWERPC_EXCP_POWER7;
|
||||
pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
|
||||
|
@ -8750,14 +8782,25 @@ static void dump_ppc_insns (CPUPPCState *env)
|
|||
}
|
||||
#endif
|
||||
|
||||
static bool avr_need_swap(CPUPPCState *env)
|
||||
{
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
return msr_le;
|
||||
#else
|
||||
return !msr_le;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int gdb_get_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
stfq_p(mem_buf, env->fpr[n]);
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
return 8;
|
||||
}
|
||||
if (n == 32) {
|
||||
stl_p(mem_buf, env->fpscr);
|
||||
ppc_maybe_bswap_register(env, mem_buf, 4);
|
||||
return 4;
|
||||
}
|
||||
return 0;
|
||||
|
@ -8766,10 +8809,12 @@ static int gdb_get_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
|||
static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
env->fpr[n] = ldfq_p(mem_buf);
|
||||
return 8;
|
||||
}
|
||||
if (n == 32) {
|
||||
ppc_maybe_bswap_register(env, mem_buf, 4);
|
||||
helper_store_fpscr(env, ldl_p(mem_buf), 0xffffffff);
|
||||
return 4;
|
||||
}
|
||||
|
@ -8779,21 +8824,25 @@ static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
|||
static int gdb_get_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
stq_p(mem_buf, env->avr[n].u64[0]);
|
||||
stq_p(mem_buf+8, env->avr[n].u64[1]);
|
||||
#else
|
||||
stq_p(mem_buf, env->avr[n].u64[1]);
|
||||
stq_p(mem_buf+8, env->avr[n].u64[0]);
|
||||
#endif
|
||||
if (!avr_need_swap(env)) {
|
||||
stq_p(mem_buf, env->avr[n].u64[0]);
|
||||
stq_p(mem_buf+8, env->avr[n].u64[1]);
|
||||
} else {
|
||||
stq_p(mem_buf, env->avr[n].u64[1]);
|
||||
stq_p(mem_buf+8, env->avr[n].u64[0]);
|
||||
}
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
ppc_maybe_bswap_register(env, mem_buf + 8, 8);
|
||||
return 16;
|
||||
}
|
||||
if (n == 32) {
|
||||
stl_p(mem_buf, env->vscr);
|
||||
ppc_maybe_bswap_register(env, mem_buf, 4);
|
||||
return 4;
|
||||
}
|
||||
if (n == 33) {
|
||||
stl_p(mem_buf, (uint32_t)env->spr[SPR_VRSAVE]);
|
||||
ppc_maybe_bswap_register(env, mem_buf, 4);
|
||||
return 4;
|
||||
}
|
||||
return 0;
|
||||
|
@ -8802,20 +8851,24 @@ static int gdb_get_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
|||
static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
env->avr[n].u64[0] = ldq_p(mem_buf);
|
||||
env->avr[n].u64[1] = ldq_p(mem_buf+8);
|
||||
#else
|
||||
env->avr[n].u64[1] = ldq_p(mem_buf);
|
||||
env->avr[n].u64[0] = ldq_p(mem_buf+8);
|
||||
#endif
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
ppc_maybe_bswap_register(env, mem_buf + 8, 8);
|
||||
if (!avr_need_swap(env)) {
|
||||
env->avr[n].u64[0] = ldq_p(mem_buf);
|
||||
env->avr[n].u64[1] = ldq_p(mem_buf+8);
|
||||
} else {
|
||||
env->avr[n].u64[1] = ldq_p(mem_buf);
|
||||
env->avr[n].u64[0] = ldq_p(mem_buf+8);
|
||||
}
|
||||
return 16;
|
||||
}
|
||||
if (n == 32) {
|
||||
ppc_maybe_bswap_register(env, mem_buf, 4);
|
||||
env->vscr = ldl_p(mem_buf);
|
||||
return 4;
|
||||
}
|
||||
if (n == 33) {
|
||||
ppc_maybe_bswap_register(env, mem_buf, 4);
|
||||
env->spr[SPR_VRSAVE] = (target_ulong)ldl_p(mem_buf);
|
||||
return 4;
|
||||
}
|
||||
|
@ -8827,6 +8880,7 @@ static int gdb_get_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
|||
if (n < 32) {
|
||||
#if defined(TARGET_PPC64)
|
||||
stl_p(mem_buf, env->gpr[n] >> 32);
|
||||
ppc_maybe_bswap_register(env, mem_buf, 4);
|
||||
#else
|
||||
stl_p(mem_buf, env->gprh[n]);
|
||||
#endif
|
||||
|
@ -8834,10 +8888,12 @@ static int gdb_get_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
|||
}
|
||||
if (n == 32) {
|
||||
stq_p(mem_buf, env->spe_acc);
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
return 8;
|
||||
}
|
||||
if (n == 33) {
|
||||
stl_p(mem_buf, env->spe_fscr);
|
||||
ppc_maybe_bswap_register(env, mem_buf, 4);
|
||||
return 4;
|
||||
}
|
||||
return 0;
|
||||
|
@ -8848,7 +8904,11 @@ static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
|||
if (n < 32) {
|
||||
#if defined(TARGET_PPC64)
|
||||
target_ulong lo = (uint32_t)env->gpr[n];
|
||||
target_ulong hi = (target_ulong)ldl_p(mem_buf) << 32;
|
||||
target_ulong hi;
|
||||
|
||||
ppc_maybe_bswap_register(env, mem_buf, 4);
|
||||
|
||||
hi = (target_ulong)ldl_p(mem_buf) << 32;
|
||||
env->gpr[n] = lo | hi;
|
||||
#else
|
||||
env->gprh[n] = ldl_p(mem_buf);
|
||||
|
@ -8856,16 +8916,38 @@ static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
|||
return 4;
|
||||
}
|
||||
if (n == 32) {
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
env->spe_acc = ldq_p(mem_buf);
|
||||
return 8;
|
||||
}
|
||||
if (n == 33) {
|
||||
ppc_maybe_bswap_register(env, mem_buf, 4);
|
||||
env->spe_fscr = ldl_p(mem_buf);
|
||||
return 4;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gdb_get_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
stq_p(mem_buf, env->vsr[n]);
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
return 8;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
env->vsr[n] = ldq_p(mem_buf);
|
||||
return 8;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ppc_fixup_cpu(PowerPCCPU *cpu)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
@ -8971,6 +9053,10 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg,
|
||||
34, "power-spe.xml", 0);
|
||||
}
|
||||
if (pcc->insns_flags2 & PPC2_VSX) {
|
||||
gdb_register_coprocessor(cs, gdb_get_vsx_reg, gdb_set_vsx_reg,
|
||||
32, "power-vsx.xml", 0);
|
||||
}
|
||||
|
||||
qemu_init_vcpu(cs);
|
||||
|
||||
|
@ -9185,7 +9271,7 @@ int ppc_get_compat_smt_threads(PowerPCCPU *cpu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version)
|
||||
void ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version, Error **errp)
|
||||
{
|
||||
int ret = 0;
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
@ -9207,12 +9293,13 @@ int ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version)
|
|||
break;
|
||||
}
|
||||
|
||||
if (kvm_enabled() && kvmppc_set_compat(cpu, cpu->cpu_version) < 0) {
|
||||
error_report("Unable to set compatibility mode in KVM");
|
||||
ret = -1;
|
||||
if (kvm_enabled()) {
|
||||
ret = kvmppc_set_compat(cpu, cpu->cpu_version);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret,
|
||||
"Unable to set CPU compatibility mode in KVM");
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static gint ppc_cpu_compare_class_pvr(gconstpointer a, gconstpointer b)
|
||||
|
|
Loading…
Reference in New Issue