mirror of https://gitee.com/openkylin/qemu.git
Merge remote-tracking branch 'sstabellini/xen-20121217' into staging
* sstabellini/xen-20121217: cpu_ioreq_pio, cpu_ioreq_move: i should be uint32_t rather than int cpu_ioreq_pio, cpu_ioreq_move: introduce read_phys_req_item, write_phys_req_item Fix compile errors when enabling Xen debug logging. xen: fix trivial PCI passthrough MSI-X bug xen: implement support for secondary consoles in the console backend Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
commit
c3a1ecd0fc
|
@ -184,7 +184,11 @@ static int con_init(struct XenDevice *xendev)
|
|||
|
||||
/* setup */
|
||||
dom = xs_get_domain_path(xenstore, con->xendev.dom);
|
||||
snprintf(con->console, sizeof(con->console), "%s/console", dom);
|
||||
if (!xendev->dev) {
|
||||
snprintf(con->console, sizeof(con->console), "%s/console", dom);
|
||||
} else {
|
||||
snprintf(con->console, sizeof(con->console), "%s/device/console/%d", dom, xendev->dev);
|
||||
}
|
||||
free(dom);
|
||||
|
||||
type = xenstore_read_str(con->console, "type");
|
||||
|
@ -223,10 +227,16 @@ static int con_initialise(struct XenDevice *xendev)
|
|||
if (xenstore_read_int(con->console, "limit", &limit) == 0)
|
||||
con->buffer.max_capacity = limit;
|
||||
|
||||
con->sring = xc_map_foreign_range(xen_xc, con->xendev.dom,
|
||||
XC_PAGE_SIZE,
|
||||
PROT_READ|PROT_WRITE,
|
||||
con->ring_ref);
|
||||
if (!xendev->dev) {
|
||||
con->sring = xc_map_foreign_range(xen_xc, con->xendev.dom,
|
||||
XC_PAGE_SIZE,
|
||||
PROT_READ|PROT_WRITE,
|
||||
con->ring_ref);
|
||||
} else {
|
||||
con->sring = xc_gnttab_map_grant_ref(xendev->gnttabdev, con->xendev.dom,
|
||||
con->ring_ref,
|
||||
PROT_READ|PROT_WRITE);
|
||||
}
|
||||
if (!con->sring)
|
||||
return -1;
|
||||
|
||||
|
@ -255,7 +265,11 @@ static void con_disconnect(struct XenDevice *xendev)
|
|||
xen_be_unbind_evtchn(&con->xendev);
|
||||
|
||||
if (con->sring) {
|
||||
munmap(con->sring, XC_PAGE_SIZE);
|
||||
if (!xendev->gnttabdev) {
|
||||
munmap(con->sring, XC_PAGE_SIZE);
|
||||
} else {
|
||||
xc_gnttab_munmap(xendev->gnttabdev, con->sring, 1);
|
||||
}
|
||||
con->sring = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -273,7 +287,7 @@ static void con_event(struct XenDevice *xendev)
|
|||
|
||||
struct XenDevOps xen_console_ops = {
|
||||
.size = sizeof(struct XenConsole),
|
||||
.flags = DEVOPS_FLAG_IGNORE_STATE,
|
||||
.flags = DEVOPS_FLAG_IGNORE_STATE|DEVOPS_FLAG_NEED_GNTDEV,
|
||||
.init = con_init,
|
||||
.initialise = con_initialise,
|
||||
.event = con_event,
|
||||
|
|
|
@ -671,7 +671,8 @@ static int xen_pt_initfn(PCIDevice *d)
|
|||
s->is_virtfn = s->real_device.is_virtfn;
|
||||
if (s->is_virtfn) {
|
||||
XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n",
|
||||
s->real_device.domain, bus, slot, func);
|
||||
s->real_device.domain, s->real_device.bus,
|
||||
s->real_device.dev, s->real_device.func);
|
||||
}
|
||||
|
||||
/* Initialize virtualized PCI configuration (Extended 256 Bytes) */
|
||||
|
@ -752,7 +753,7 @@ out:
|
|||
memory_listener_register(&s->memory_listener, &address_space_memory);
|
||||
memory_listener_register(&s->io_listener, &address_space_io);
|
||||
XEN_PT_LOG(d, "Real physical device %02x:%02x.%d registered successfuly!\n",
|
||||
bus, slot, func);
|
||||
s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -321,7 +321,7 @@ static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr)
|
|||
|
||||
pirq = entry->pirq;
|
||||
|
||||
rc = msi_msix_setup(s, entry->data, entry->data, &pirq, true, entry_nr,
|
||||
rc = msi_msix_setup(s, entry->addr, entry->data, &pirq, true, entry_nr,
|
||||
entry->pirq == XEN_PT_UNASSIGNED_PIRQ);
|
||||
if (rc) {
|
||||
return rc;
|
||||
|
|
83
xen-all.c
83
xen-all.c
|
@ -292,7 +292,8 @@ static int xen_add_to_physmap(XenIOState *state,
|
|||
return -1;
|
||||
|
||||
go_physmap:
|
||||
DPRINTF("mapping vram to %llx - %llx\n", start_addr, start_addr + size);
|
||||
DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
|
||||
start_addr, start_addr + size);
|
||||
|
||||
pfn = phys_offset >> TARGET_PAGE_BITS;
|
||||
start_gpfn = start_addr >> TARGET_PAGE_BITS;
|
||||
|
@ -365,8 +366,8 @@ static int xen_remove_from_physmap(XenIOState *state,
|
|||
phys_offset = physmap->phys_offset;
|
||||
size = physmap->size;
|
||||
|
||||
DPRINTF("unmapping vram to %llx - %llx, from %llx\n",
|
||||
phys_offset, phys_offset + size, start_addr);
|
||||
DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", from ",
|
||||
"%"HWADDR_PRIx"\n", phys_offset, phys_offset + size, start_addr);
|
||||
|
||||
size >>= TARGET_PAGE_BITS;
|
||||
start_addr >>= TARGET_PAGE_BITS;
|
||||
|
@ -682,11 +683,45 @@ static void do_outp(pio_addr_t addr,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper functions which read/write an object from/to physical guest
|
||||
* memory, as part of the implementation of an ioreq.
|
||||
*
|
||||
* Equivalent to
|
||||
* cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
|
||||
* val, req->size, 0/1)
|
||||
* except without the integer overflow problems.
|
||||
*/
|
||||
static void rw_phys_req_item(hwaddr addr,
|
||||
ioreq_t *req, uint32_t i, void *val, int rw)
|
||||
{
|
||||
/* Do everything unsigned so overflow just results in a truncated result
|
||||
* and accesses to undesired parts of guest memory, which is up
|
||||
* to the guest */
|
||||
hwaddr offset = (hwaddr)req->size * i;
|
||||
if (req->df) {
|
||||
addr -= offset;
|
||||
} else {
|
||||
addr += offset;
|
||||
}
|
||||
cpu_physical_memory_rw(addr, val, req->size, rw);
|
||||
}
|
||||
|
||||
static inline void read_phys_req_item(hwaddr addr,
|
||||
ioreq_t *req, uint32_t i, void *val)
|
||||
{
|
||||
rw_phys_req_item(addr, req, i, val, 0);
|
||||
}
|
||||
static inline void write_phys_req_item(hwaddr addr,
|
||||
ioreq_t *req, uint32_t i, void *val)
|
||||
{
|
||||
rw_phys_req_item(addr, req, i, val, 1);
|
||||
}
|
||||
|
||||
|
||||
static void cpu_ioreq_pio(ioreq_t *req)
|
||||
{
|
||||
int i, sign;
|
||||
|
||||
sign = req->df ? -1 : 1;
|
||||
uint32_t i;
|
||||
|
||||
if (req->dir == IOREQ_READ) {
|
||||
if (!req->data_is_ptr) {
|
||||
|
@ -696,9 +731,7 @@ static void cpu_ioreq_pio(ioreq_t *req)
|
|||
|
||||
for (i = 0; i < req->count; i++) {
|
||||
tmp = do_inp(req->addr, req->size);
|
||||
cpu_physical_memory_write(
|
||||
req->data + (sign * i * (int64_t)req->size),
|
||||
(uint8_t *) &tmp, req->size);
|
||||
write_phys_req_item(req->data, req, i, &tmp);
|
||||
}
|
||||
}
|
||||
} else if (req->dir == IOREQ_WRITE) {
|
||||
|
@ -708,9 +741,7 @@ static void cpu_ioreq_pio(ioreq_t *req)
|
|||
for (i = 0; i < req->count; i++) {
|
||||
uint32_t tmp = 0;
|
||||
|
||||
cpu_physical_memory_read(
|
||||
req->data + (sign * i * (int64_t)req->size),
|
||||
(uint8_t*) &tmp, req->size);
|
||||
read_phys_req_item(req->data, req, i, &tmp);
|
||||
do_outp(req->addr, req->size, tmp);
|
||||
}
|
||||
}
|
||||
|
@ -719,22 +750,16 @@ static void cpu_ioreq_pio(ioreq_t *req)
|
|||
|
||||
static void cpu_ioreq_move(ioreq_t *req)
|
||||
{
|
||||
int i, sign;
|
||||
|
||||
sign = req->df ? -1 : 1;
|
||||
uint32_t i;
|
||||
|
||||
if (!req->data_is_ptr) {
|
||||
if (req->dir == IOREQ_READ) {
|
||||
for (i = 0; i < req->count; i++) {
|
||||
cpu_physical_memory_read(
|
||||
req->addr + (sign * i * (int64_t)req->size),
|
||||
(uint8_t *) &req->data, req->size);
|
||||
read_phys_req_item(req->addr, req, i, &req->data);
|
||||
}
|
||||
} else if (req->dir == IOREQ_WRITE) {
|
||||
for (i = 0; i < req->count; i++) {
|
||||
cpu_physical_memory_write(
|
||||
req->addr + (sign * i * (int64_t)req->size),
|
||||
(uint8_t *) &req->data, req->size);
|
||||
write_phys_req_item(req->addr, req, i, &req->data);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -742,21 +767,13 @@ static void cpu_ioreq_move(ioreq_t *req)
|
|||
|
||||
if (req->dir == IOREQ_READ) {
|
||||
for (i = 0; i < req->count; i++) {
|
||||
cpu_physical_memory_read(
|
||||
req->addr + (sign * i * (int64_t)req->size),
|
||||
(uint8_t*) &tmp, req->size);
|
||||
cpu_physical_memory_write(
|
||||
req->data + (sign * i * (int64_t)req->size),
|
||||
(uint8_t*) &tmp, req->size);
|
||||
read_phys_req_item(req->addr, req, i, &tmp);
|
||||
write_phys_req_item(req->data, req, i, &tmp);
|
||||
}
|
||||
} else if (req->dir == IOREQ_WRITE) {
|
||||
for (i = 0; i < req->count; i++) {
|
||||
cpu_physical_memory_read(
|
||||
req->data + (sign * i * (int64_t)req->size),
|
||||
(uint8_t*) &tmp, req->size);
|
||||
cpu_physical_memory_write(
|
||||
req->addr + (sign * i * (int64_t)req->size),
|
||||
(uint8_t*) &tmp, req->size);
|
||||
read_phys_req_item(req->data, req, i, &tmp);
|
||||
write_phys_req_item(req->addr, req, i, &tmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue