2018-03-02 20:31:13 +08:00
|
|
|
/*
|
|
|
|
* QEMU RISC-V VirtIO Board
|
|
|
|
*
|
|
|
|
* Copyright (c) 2017 SiFive, Inc.
|
|
|
|
*
|
|
|
|
* RISC-V machine with 16550a UART and VirtIO MMIO
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2 or later, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
2018-06-25 20:42:08 +08:00
|
|
|
#include "qemu/units.h"
|
2018-03-02 20:31:13 +08:00
|
|
|
#include "qemu/log.h"
|
|
|
|
#include "qemu/error-report.h"
|
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "hw/boards.h"
|
|
|
|
#include "hw/loader.h"
|
|
|
|
#include "hw/sysbus.h"
|
2019-10-09 07:32:25 +08:00
|
|
|
#include "hw/qdev-properties.h"
|
2018-03-02 20:31:13 +08:00
|
|
|
#include "hw/char/serial.h"
|
|
|
|
#include "target/riscv/cpu.h"
|
|
|
|
#include "hw/riscv/riscv_hart.h"
|
|
|
|
#include "hw/riscv/virt.h"
|
2019-06-25 06:11:49 +08:00
|
|
|
#include "hw/riscv/boot.h"
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
#include "hw/riscv/numa.h"
|
2020-09-03 18:40:16 +08:00
|
|
|
#include "hw/intc/sifive_clint.h"
|
2020-09-03 18:40:17 +08:00
|
|
|
#include "hw/intc/sifive_plic.h"
|
2020-09-03 18:40:20 +08:00
|
|
|
#include "hw/misc/sifive_test.h"
|
2018-03-02 20:31:13 +08:00
|
|
|
#include "chardev/char.h"
|
|
|
|
#include "sysemu/arch_init.h"
|
|
|
|
#include "sysemu/device_tree.h"
|
2019-08-12 13:23:57 +08:00
|
|
|
#include "sysemu/sysemu.h"
|
2018-12-12 06:37:36 +08:00
|
|
|
#include "hw/pci/pci.h"
|
|
|
|
#include "hw/pci-host/gpex.h"
|
2018-03-02 20:31:13 +08:00
|
|
|
|
|
|
|
static const struct MemmapEntry {
|
|
|
|
hwaddr base;
|
|
|
|
hwaddr size;
|
|
|
|
} virt_memmap[] = {
|
2018-12-12 06:37:26 +08:00
|
|
|
[VIRT_DEBUG] = { 0x0, 0x100 },
|
2020-07-09 18:05:43 +08:00
|
|
|
[VIRT_MROM] = { 0x1000, 0xf000 },
|
2018-12-12 06:37:26 +08:00
|
|
|
[VIRT_TEST] = { 0x100000, 0x1000 },
|
2019-11-06 19:56:43 +08:00
|
|
|
[VIRT_RTC] = { 0x101000, 0x1000 },
|
2018-12-12 06:37:26 +08:00
|
|
|
[VIRT_CLINT] = { 0x2000000, 0x10000 },
|
2020-07-03 11:21:51 +08:00
|
|
|
[VIRT_PCIE_PIO] = { 0x3000000, 0x10000 },
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
[VIRT_PLIC] = { 0xc000000, VIRT_PLIC_SIZE(VIRT_CPUS_MAX * 2) },
|
2018-12-12 06:37:26 +08:00
|
|
|
[VIRT_UART0] = { 0x10000000, 0x100 },
|
|
|
|
[VIRT_VIRTIO] = { 0x10001000, 0x1000 },
|
2019-11-07 08:47:20 +08:00
|
|
|
[VIRT_FLASH] = { 0x20000000, 0x4000000 },
|
2018-12-12 06:37:36 +08:00
|
|
|
[VIRT_PCIE_ECAM] = { 0x30000000, 0x10000000 },
|
2020-07-03 11:21:51 +08:00
|
|
|
[VIRT_PCIE_MMIO] = { 0x40000000, 0x40000000 },
|
|
|
|
[VIRT_DRAM] = { 0x80000000, 0x0 },
|
2018-03-02 20:31:13 +08:00
|
|
|
};
|
|
|
|
|
2019-10-09 07:32:25 +08:00
|
|
|
#define VIRT_FLASH_SECTOR_SIZE (256 * KiB)
|
|
|
|
|
|
|
|
static PFlashCFI01 *virt_flash_create1(RISCVVirtState *s,
|
|
|
|
const char *name,
|
|
|
|
const char *alias_prop_name)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Create a single flash device. We use the same parameters as
|
|
|
|
* the flash devices on the ARM virt board.
|
|
|
|
*/
|
2020-06-10 13:31:59 +08:00
|
|
|
DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01);
|
2019-10-09 07:32:25 +08:00
|
|
|
|
|
|
|
qdev_prop_set_uint64(dev, "sector-length", VIRT_FLASH_SECTOR_SIZE);
|
|
|
|
qdev_prop_set_uint8(dev, "width", 4);
|
|
|
|
qdev_prop_set_uint8(dev, "device-width", 2);
|
|
|
|
qdev_prop_set_bit(dev, "big-endian", false);
|
|
|
|
qdev_prop_set_uint16(dev, "id0", 0x89);
|
|
|
|
qdev_prop_set_uint16(dev, "id1", 0x18);
|
|
|
|
qdev_prop_set_uint16(dev, "id2", 0x00);
|
|
|
|
qdev_prop_set_uint16(dev, "id3", 0x00);
|
|
|
|
qdev_prop_set_string(dev, "name", name);
|
|
|
|
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 23:29:22 +08:00
|
|
|
object_property_add_child(OBJECT(s), name, OBJECT(dev));
|
2019-10-09 07:32:25 +08:00
|
|
|
object_property_add_alias(OBJECT(s), alias_prop_name,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 23:29:22 +08:00
|
|
|
OBJECT(dev), "drive");
|
2019-10-09 07:32:25 +08:00
|
|
|
|
|
|
|
return PFLASH_CFI01(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virt_flash_create(RISCVVirtState *s)
|
|
|
|
{
|
|
|
|
s->flash[0] = virt_flash_create1(s, "virt.flash0", "pflash0");
|
|
|
|
s->flash[1] = virt_flash_create1(s, "virt.flash1", "pflash1");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virt_flash_map1(PFlashCFI01 *flash,
|
|
|
|
hwaddr base, hwaddr size,
|
|
|
|
MemoryRegion *sysmem)
|
|
|
|
{
|
|
|
|
DeviceState *dev = DEVICE(flash);
|
|
|
|
|
2020-05-12 04:52:46 +08:00
|
|
|
assert(QEMU_IS_ALIGNED(size, VIRT_FLASH_SECTOR_SIZE));
|
2019-10-09 07:32:25 +08:00
|
|
|
assert(size / VIRT_FLASH_SECTOR_SIZE <= UINT32_MAX);
|
|
|
|
qdev_prop_set_uint32(dev, "num-blocks", size / VIRT_FLASH_SECTOR_SIZE);
|
sysbus: Convert to sysbus_realize() etc. with Coccinelle
Convert from qdev_realize(), qdev_realize_and_unref() with null @bus
argument to sysbus_realize(), sysbus_realize_and_unref().
Coccinelle script:
@@
expression dev, errp;
@@
- qdev_realize(DEVICE(dev), NULL, errp);
+ sysbus_realize(SYS_BUS_DEVICE(dev), errp);
@@
expression sysbus_dev, dev, errp;
@@
+ sysbus_dev = SYS_BUS_DEVICE(dev);
- qdev_realize_and_unref(dev, NULL, errp);
+ sysbus_realize_and_unref(sysbus_dev, errp);
- sysbus_dev = SYS_BUS_DEVICE(dev);
@@
expression sysbus_dev, dev, errp;
expression expr;
@@
sysbus_dev = SYS_BUS_DEVICE(dev);
... when != dev = expr;
- qdev_realize_and_unref(dev, NULL, errp);
+ sysbus_realize_and_unref(sysbus_dev, errp);
@@
expression dev, errp;
@@
- qdev_realize_and_unref(DEVICE(dev), NULL, errp);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp);
@@
expression dev, errp;
@@
- qdev_realize_and_unref(dev, NULL, errp);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp);
Whitespace changes minimized manually.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200610053247.1583243-46-armbru@redhat.com>
[Conflicts in hw/misc/empty_slot.c and hw/sparc/leon3.c resolved]
2020-06-10 13:32:34 +08:00
|
|
|
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
|
2019-10-09 07:32:25 +08:00
|
|
|
|
|
|
|
memory_region_add_subregion(sysmem, base,
|
|
|
|
sysbus_mmio_get_region(SYS_BUS_DEVICE(dev),
|
|
|
|
0));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virt_flash_map(RISCVVirtState *s,
|
|
|
|
MemoryRegion *sysmem)
|
|
|
|
{
|
|
|
|
hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2;
|
|
|
|
hwaddr flashbase = virt_memmap[VIRT_FLASH].base;
|
|
|
|
|
|
|
|
virt_flash_map1(s->flash[0], flashbase, flashsize,
|
|
|
|
sysmem);
|
|
|
|
virt_flash_map1(s->flash[1], flashbase + flashsize, flashsize,
|
|
|
|
sysmem);
|
|
|
|
}
|
|
|
|
|
2018-12-12 06:37:36 +08:00
|
|
|
static void create_pcie_irq_map(void *fdt, char *nodename,
|
|
|
|
uint32_t plic_phandle)
|
|
|
|
{
|
|
|
|
int pin, dev;
|
|
|
|
uint32_t
|
|
|
|
full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * FDT_INT_MAP_WIDTH] = {};
|
|
|
|
uint32_t *irq_map = full_irq_map;
|
|
|
|
|
|
|
|
/* This code creates a standard swizzle of interrupts such that
|
|
|
|
* each device's first interrupt is based on it's PCI_SLOT number.
|
|
|
|
* (See pci_swizzle_map_irq_fn())
|
|
|
|
*
|
|
|
|
* We only need one entry per interrupt in the table (not one per
|
|
|
|
* possible slot) seeing the interrupt-map-mask will allow the table
|
|
|
|
* to wrap to any number of devices.
|
|
|
|
*/
|
|
|
|
for (dev = 0; dev < GPEX_NUM_IRQS; dev++) {
|
|
|
|
int devfn = dev * 0x8;
|
|
|
|
|
|
|
|
for (pin = 0; pin < GPEX_NUM_IRQS; pin++) {
|
|
|
|
int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS);
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
irq_map[i] = cpu_to_be32(devfn << 8);
|
|
|
|
|
|
|
|
i += FDT_PCI_ADDR_CELLS;
|
|
|
|
irq_map[i] = cpu_to_be32(pin + 1);
|
|
|
|
|
|
|
|
i += FDT_PCI_INT_CELLS;
|
|
|
|
irq_map[i++] = cpu_to_be32(plic_phandle);
|
|
|
|
|
|
|
|
i += FDT_PLIC_ADDR_CELLS;
|
|
|
|
irq_map[i] = cpu_to_be32(irq_nr);
|
|
|
|
|
|
|
|
irq_map += FDT_INT_MAP_WIDTH;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_fdt_setprop(fdt, nodename, "interrupt-map",
|
|
|
|
full_irq_map, sizeof(full_irq_map));
|
|
|
|
|
|
|
|
qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask",
|
|
|
|
0x1800, 0, 0, 0x7);
|
|
|
|
}
|
|
|
|
|
2019-09-07 00:19:53 +08:00
|
|
|
static void create_fdt(RISCVVirtState *s, const struct MemmapEntry *memmap,
|
2020-12-17 02:22:40 +08:00
|
|
|
uint64_t mem_size, const char *cmdline, bool is_32_bit)
|
2018-03-02 20:31:13 +08:00
|
|
|
{
|
|
|
|
void *fdt;
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
int i, cpu, socket;
|
|
|
|
MachineState *mc = MACHINE(s);
|
|
|
|
uint64_t addr, size;
|
|
|
|
uint32_t *clint_cells, *plic_cells;
|
|
|
|
unsigned long clint_addr, plic_addr;
|
|
|
|
uint32_t plic_phandle[MAX_NODES];
|
|
|
|
uint32_t cpu_phandle, intc_phandle, test_phandle;
|
|
|
|
uint32_t phandle = 1, plic_mmio_phandle = 1;
|
|
|
|
uint32_t plic_pcie_phandle = 1, plic_virtio_phandle = 1;
|
|
|
|
char *mem_name, *cpu_name, *core_name, *intc_name;
|
|
|
|
char *name, *clint_name, *plic_name, *clust_name;
|
2019-10-09 07:32:25 +08:00
|
|
|
hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2;
|
|
|
|
hwaddr flashbase = virt_memmap[VIRT_FLASH].base;
|
2018-03-02 20:31:13 +08:00
|
|
|
|
2020-11-02 22:44:36 +08:00
|
|
|
if (mc->dtb) {
|
|
|
|
fdt = s->fdt = load_device_tree(mc->dtb, &s->fdt_size);
|
2020-10-22 13:32:25 +08:00
|
|
|
if (!fdt) {
|
|
|
|
error_report("load_device_tree() failed");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
goto update_bootargs;
|
|
|
|
} else {
|
|
|
|
fdt = s->fdt = create_device_tree(&s->fdt_size);
|
|
|
|
if (!fdt) {
|
|
|
|
error_report("create_device_tree() failed");
|
|
|
|
exit(1);
|
|
|
|
}
|
2018-03-02 20:31:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
qemu_fdt_setprop_string(fdt, "/", "model", "riscv-virtio,qemu");
|
|
|
|
qemu_fdt_setprop_string(fdt, "/", "compatible", "riscv-virtio");
|
|
|
|
qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2);
|
|
|
|
qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2);
|
|
|
|
|
|
|
|
qemu_fdt_add_subnode(fdt, "/soc");
|
|
|
|
qemu_fdt_setprop(fdt, "/soc", "ranges", NULL, 0);
|
2018-06-20 05:21:47 +08:00
|
|
|
qemu_fdt_setprop_string(fdt, "/soc", "compatible", "simple-bus");
|
2018-03-02 20:31:13 +08:00
|
|
|
qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x2);
|
|
|
|
qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x2);
|
|
|
|
|
|
|
|
qemu_fdt_add_subnode(fdt, "/cpus");
|
2018-03-03 09:30:07 +08:00
|
|
|
qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency",
|
|
|
|
SIFIVE_CLINT_TIMEBASE_FREQ);
|
2018-03-02 20:31:13 +08:00
|
|
|
qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0);
|
|
|
|
qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qemu_fdt_add_subnode(fdt, "/cpus/cpu-map");
|
|
|
|
|
|
|
|
for (socket = (riscv_socket_count(mc) - 1); socket >= 0; socket--) {
|
|
|
|
clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket);
|
|
|
|
qemu_fdt_add_subnode(fdt, clust_name);
|
|
|
|
|
|
|
|
plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4);
|
|
|
|
clint_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4);
|
|
|
|
|
|
|
|
for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
|
|
|
|
cpu_phandle = phandle++;
|
2018-03-02 20:31:13 +08:00
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
cpu_name = g_strdup_printf("/cpus/cpu@%d",
|
|
|
|
s->soc[socket].hartid_base + cpu);
|
|
|
|
qemu_fdt_add_subnode(fdt, cpu_name);
|
2020-12-17 02:22:40 +08:00
|
|
|
if (is_32_bit) {
|
|
|
|
qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv32");
|
|
|
|
} else {
|
|
|
|
qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv48");
|
|
|
|
}
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
name = riscv_isa_string(&s->soc[socket].harts[cpu]);
|
|
|
|
qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name);
|
|
|
|
g_free(name);
|
|
|
|
qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv");
|
|
|
|
qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay");
|
|
|
|
qemu_fdt_setprop_cell(fdt, cpu_name, "reg",
|
|
|
|
s->soc[socket].hartid_base + cpu);
|
|
|
|
qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu");
|
|
|
|
riscv_socket_fdt_write_id(mc, fdt, cpu_name, socket);
|
|
|
|
qemu_fdt_setprop_cell(fdt, cpu_name, "phandle", cpu_phandle);
|
|
|
|
|
|
|
|
intc_name = g_strdup_printf("%s/interrupt-controller", cpu_name);
|
|
|
|
qemu_fdt_add_subnode(fdt, intc_name);
|
|
|
|
intc_phandle = phandle++;
|
|
|
|
qemu_fdt_setprop_cell(fdt, intc_name, "phandle", intc_phandle);
|
|
|
|
qemu_fdt_setprop_string(fdt, intc_name, "compatible",
|
|
|
|
"riscv,cpu-intc");
|
|
|
|
qemu_fdt_setprop(fdt, intc_name, "interrupt-controller", NULL, 0);
|
|
|
|
qemu_fdt_setprop_cell(fdt, intc_name, "#interrupt-cells", 1);
|
|
|
|
|
|
|
|
clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
|
|
|
|
clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
|
|
|
|
clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
|
|
|
|
clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
|
|
|
|
|
|
|
|
plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
|
|
|
|
plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT);
|
|
|
|
plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
|
|
|
|
plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT);
|
|
|
|
|
|
|
|
core_name = g_strdup_printf("%s/core%d", clust_name, cpu);
|
|
|
|
qemu_fdt_add_subnode(fdt, core_name);
|
|
|
|
qemu_fdt_setprop_cell(fdt, core_name, "cpu", cpu_phandle);
|
|
|
|
|
|
|
|
g_free(core_name);
|
|
|
|
g_free(intc_name);
|
|
|
|
g_free(cpu_name);
|
|
|
|
}
|
2018-03-02 20:31:13 +08:00
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
addr = memmap[VIRT_DRAM].base + riscv_socket_mem_offset(mc, socket);
|
|
|
|
size = riscv_socket_mem_size(mc, socket);
|
|
|
|
mem_name = g_strdup_printf("/memory@%lx", (long)addr);
|
|
|
|
qemu_fdt_add_subnode(fdt, mem_name);
|
|
|
|
qemu_fdt_setprop_cells(fdt, mem_name, "reg",
|
|
|
|
addr >> 32, addr, size >> 32, size);
|
|
|
|
qemu_fdt_setprop_string(fdt, mem_name, "device_type", "memory");
|
|
|
|
riscv_socket_fdt_write_id(mc, fdt, mem_name, socket);
|
|
|
|
g_free(mem_name);
|
|
|
|
|
|
|
|
clint_addr = memmap[VIRT_CLINT].base +
|
|
|
|
(memmap[VIRT_CLINT].size * socket);
|
|
|
|
clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr);
|
|
|
|
qemu_fdt_add_subnode(fdt, clint_name);
|
|
|
|
qemu_fdt_setprop_string(fdt, clint_name, "compatible", "riscv,clint0");
|
|
|
|
qemu_fdt_setprop_cells(fdt, clint_name, "reg",
|
|
|
|
0x0, clint_addr, 0x0, memmap[VIRT_CLINT].size);
|
|
|
|
qemu_fdt_setprop(fdt, clint_name, "interrupts-extended",
|
|
|
|
clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4);
|
|
|
|
riscv_socket_fdt_write_id(mc, fdt, clint_name, socket);
|
|
|
|
g_free(clint_name);
|
|
|
|
|
|
|
|
plic_phandle[socket] = phandle++;
|
|
|
|
plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket);
|
|
|
|
plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr);
|
|
|
|
qemu_fdt_add_subnode(fdt, plic_name);
|
|
|
|
qemu_fdt_setprop_cell(fdt, plic_name,
|
|
|
|
"#address-cells", FDT_PLIC_ADDR_CELLS);
|
|
|
|
qemu_fdt_setprop_cell(fdt, plic_name,
|
|
|
|
"#interrupt-cells", FDT_PLIC_INT_CELLS);
|
|
|
|
qemu_fdt_setprop_string(fdt, plic_name, "compatible", "riscv,plic0");
|
|
|
|
qemu_fdt_setprop(fdt, plic_name, "interrupt-controller", NULL, 0);
|
|
|
|
qemu_fdt_setprop(fdt, plic_name, "interrupts-extended",
|
|
|
|
plic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4);
|
|
|
|
qemu_fdt_setprop_cells(fdt, plic_name, "reg",
|
|
|
|
0x0, plic_addr, 0x0, memmap[VIRT_PLIC].size);
|
|
|
|
qemu_fdt_setprop_cell(fdt, plic_name, "riscv,ndev", VIRTIO_NDEV);
|
|
|
|
riscv_socket_fdt_write_id(mc, fdt, plic_name, socket);
|
|
|
|
qemu_fdt_setprop_cell(fdt, plic_name, "phandle", plic_phandle[socket]);
|
|
|
|
g_free(plic_name);
|
|
|
|
|
|
|
|
g_free(clint_cells);
|
|
|
|
g_free(plic_cells);
|
|
|
|
g_free(clust_name);
|
2019-06-25 07:41:44 +08:00
|
|
|
}
|
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
for (socket = 0; socket < riscv_socket_count(mc); socket++) {
|
|
|
|
if (socket == 0) {
|
|
|
|
plic_mmio_phandle = plic_phandle[socket];
|
|
|
|
plic_virtio_phandle = plic_phandle[socket];
|
|
|
|
plic_pcie_phandle = plic_phandle[socket];
|
|
|
|
}
|
|
|
|
if (socket == 1) {
|
|
|
|
plic_virtio_phandle = plic_phandle[socket];
|
|
|
|
plic_pcie_phandle = plic_phandle[socket];
|
|
|
|
}
|
|
|
|
if (socket == 2) {
|
|
|
|
plic_pcie_phandle = plic_phandle[socket];
|
|
|
|
}
|
2018-03-02 20:31:13 +08:00
|
|
|
}
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
|
|
|
|
riscv_socket_fdt_write_distance_matrix(mc, fdt);
|
2018-03-02 20:31:13 +08:00
|
|
|
|
|
|
|
for (i = 0; i < VIRTIO_COUNT; i++) {
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
name = g_strdup_printf("/soc/virtio_mmio@%lx",
|
2018-03-02 20:31:13 +08:00
|
|
|
(long)(memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size));
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qemu_fdt_add_subnode(fdt, name);
|
|
|
|
qemu_fdt_setprop_string(fdt, name, "compatible", "virtio,mmio");
|
|
|
|
qemu_fdt_setprop_cells(fdt, name, "reg",
|
2018-03-02 20:31:13 +08:00
|
|
|
0x0, memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
|
|
|
|
0x0, memmap[VIRT_VIRTIO].size);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qemu_fdt_setprop_cell(fdt, name, "interrupt-parent",
|
|
|
|
plic_virtio_phandle);
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "interrupts", VIRTIO_IRQ + i);
|
|
|
|
g_free(name);
|
2018-03-02 20:31:13 +08:00
|
|
|
}
|
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
name = g_strdup_printf("/soc/pci@%lx",
|
2018-12-12 06:37:36 +08:00
|
|
|
(long) memmap[VIRT_PCIE_ECAM].base);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qemu_fdt_add_subnode(fdt, name);
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "#address-cells", FDT_PCI_ADDR_CELLS);
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "#interrupt-cells", FDT_PCI_INT_CELLS);
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "#size-cells", 0x2);
|
|
|
|
qemu_fdt_setprop_string(fdt, name, "compatible", "pci-host-ecam-generic");
|
|
|
|
qemu_fdt_setprop_string(fdt, name, "device_type", "pci");
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "linux,pci-domain", 0);
|
|
|
|
qemu_fdt_setprop_cells(fdt, name, "bus-range", 0,
|
|
|
|
memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1);
|
|
|
|
qemu_fdt_setprop(fdt, name, "dma-coherent", NULL, 0);
|
|
|
|
qemu_fdt_setprop_cells(fdt, name, "reg", 0,
|
|
|
|
memmap[VIRT_PCIE_ECAM].base, 0, memmap[VIRT_PCIE_ECAM].size);
|
|
|
|
qemu_fdt_setprop_sized_cells(fdt, name, "ranges",
|
2018-12-12 06:37:36 +08:00
|
|
|
1, FDT_PCI_RANGE_IOPORT, 2, 0,
|
|
|
|
2, memmap[VIRT_PCIE_PIO].base, 2, memmap[VIRT_PCIE_PIO].size,
|
|
|
|
1, FDT_PCI_RANGE_MMIO,
|
|
|
|
2, memmap[VIRT_PCIE_MMIO].base,
|
|
|
|
2, memmap[VIRT_PCIE_MMIO].base, 2, memmap[VIRT_PCIE_MMIO].size);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
create_pcie_irq_map(fdt, name, plic_pcie_phandle);
|
|
|
|
g_free(name);
|
2018-12-12 06:37:36 +08:00
|
|
|
|
2020-01-22 21:17:23 +08:00
|
|
|
test_phandle = phandle++;
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
name = g_strdup_printf("/soc/test@%lx",
|
2018-03-02 20:31:13 +08:00
|
|
|
(long)memmap[VIRT_TEST].base);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qemu_fdt_add_subnode(fdt, name);
|
2019-11-08 06:25:00 +08:00
|
|
|
{
|
2020-01-22 21:17:23 +08:00
|
|
|
const char compat[] = "sifive,test1\0sifive,test0\0syscon";
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qemu_fdt_setprop(fdt, name, "compatible", compat, sizeof(compat));
|
2019-11-08 06:25:00 +08:00
|
|
|
}
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qemu_fdt_setprop_cells(fdt, name, "reg",
|
2018-03-02 20:31:13 +08:00
|
|
|
0x0, memmap[VIRT_TEST].base,
|
|
|
|
0x0, memmap[VIRT_TEST].size);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qemu_fdt_setprop_cell(fdt, name, "phandle", test_phandle);
|
|
|
|
test_phandle = qemu_fdt_get_phandle(fdt, name);
|
|
|
|
g_free(name);
|
|
|
|
|
|
|
|
name = g_strdup_printf("/soc/reboot");
|
|
|
|
qemu_fdt_add_subnode(fdt, name);
|
|
|
|
qemu_fdt_setprop_string(fdt, name, "compatible", "syscon-reboot");
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "regmap", test_phandle);
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "offset", 0x0);
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "value", FINISHER_RESET);
|
|
|
|
g_free(name);
|
|
|
|
|
|
|
|
name = g_strdup_printf("/soc/poweroff");
|
|
|
|
qemu_fdt_add_subnode(fdt, name);
|
|
|
|
qemu_fdt_setprop_string(fdt, name, "compatible", "syscon-poweroff");
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "regmap", test_phandle);
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "offset", 0x0);
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "value", FINISHER_PASS);
|
|
|
|
g_free(name);
|
|
|
|
|
|
|
|
name = g_strdup_printf("/soc/uart@%lx", (long)memmap[VIRT_UART0].base);
|
|
|
|
qemu_fdt_add_subnode(fdt, name);
|
|
|
|
qemu_fdt_setprop_string(fdt, name, "compatible", "ns16550a");
|
|
|
|
qemu_fdt_setprop_cells(fdt, name, "reg",
|
2018-03-02 20:31:13 +08:00
|
|
|
0x0, memmap[VIRT_UART0].base,
|
|
|
|
0x0, memmap[VIRT_UART0].size);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qemu_fdt_setprop_cell(fdt, name, "clock-frequency", 3686400);
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "interrupt-parent", plic_mmio_phandle);
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "interrupts", UART0_IRQ);
|
2018-03-02 20:31:13 +08:00
|
|
|
|
|
|
|
qemu_fdt_add_subnode(fdt, "/chosen");
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", name);
|
|
|
|
g_free(name);
|
|
|
|
|
|
|
|
name = g_strdup_printf("/soc/rtc@%lx", (long)memmap[VIRT_RTC].base);
|
|
|
|
qemu_fdt_add_subnode(fdt, name);
|
|
|
|
qemu_fdt_setprop_string(fdt, name, "compatible", "google,goldfish-rtc");
|
|
|
|
qemu_fdt_setprop_cells(fdt, name, "reg",
|
2019-11-06 19:56:43 +08:00
|
|
|
0x0, memmap[VIRT_RTC].base,
|
|
|
|
0x0, memmap[VIRT_RTC].size);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qemu_fdt_setprop_cell(fdt, name, "interrupt-parent", plic_mmio_phandle);
|
|
|
|
qemu_fdt_setprop_cell(fdt, name, "interrupts", RTC_IRQ);
|
|
|
|
g_free(name);
|
|
|
|
|
|
|
|
name = g_strdup_printf("/soc/flash@%" PRIx64, flashbase);
|
|
|
|
qemu_fdt_add_subnode(s->fdt, name);
|
|
|
|
qemu_fdt_setprop_string(s->fdt, name, "compatible", "cfi-flash");
|
|
|
|
qemu_fdt_setprop_sized_cells(s->fdt, name, "reg",
|
2019-10-09 07:32:25 +08:00
|
|
|
2, flashbase, 2, flashsize,
|
|
|
|
2, flashbase + flashsize, 2, flashsize);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qemu_fdt_setprop_cell(s->fdt, name, "bank-width", 4);
|
|
|
|
g_free(name);
|
2020-10-22 13:32:25 +08:00
|
|
|
|
|
|
|
update_bootargs:
|
|
|
|
if (cmdline) {
|
|
|
|
qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline);
|
|
|
|
}
|
2018-03-02 20:31:13 +08:00
|
|
|
}
|
|
|
|
|
2018-12-12 06:37:36 +08:00
|
|
|
static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
|
|
|
|
hwaddr ecam_base, hwaddr ecam_size,
|
|
|
|
hwaddr mmio_base, hwaddr mmio_size,
|
|
|
|
hwaddr pio_base,
|
|
|
|
DeviceState *plic, bool link_up)
|
|
|
|
{
|
|
|
|
DeviceState *dev;
|
|
|
|
MemoryRegion *ecam_alias, *ecam_reg;
|
|
|
|
MemoryRegion *mmio_alias, *mmio_reg;
|
|
|
|
qemu_irq irq;
|
|
|
|
int i;
|
|
|
|
|
qdev: Convert uses of qdev_create() with Coccinelle
This is the transformation explained in the commit before previous.
Takes care of just one pattern that needs conversion. More to come in
this series.
Coccinelle script:
@ depends on !(file in "hw/arm/highbank.c")@
expression bus, type_name, dev, expr;
@@
- dev = qdev_create(bus, type_name);
+ dev = qdev_new(type_name);
... when != dev = expr
- qdev_init_nofail(dev);
+ qdev_realize_and_unref(dev, bus, &error_fatal);
@@
expression bus, type_name, dev, expr;
identifier DOWN;
@@
- dev = DOWN(qdev_create(bus, type_name));
+ dev = DOWN(qdev_new(type_name));
... when != dev = expr
- qdev_init_nofail(DEVICE(dev));
+ qdev_realize_and_unref(DEVICE(dev), bus, &error_fatal);
@@
expression bus, type_name, expr;
identifier dev;
@@
- DeviceState *dev = qdev_create(bus, type_name);
+ DeviceState *dev = qdev_new(type_name);
... when != dev = expr
- qdev_init_nofail(dev);
+ qdev_realize_and_unref(dev, bus, &error_fatal);
@@
expression bus, type_name, dev, expr, errp;
symbol true;
@@
- dev = qdev_create(bus, type_name);
+ dev = qdev_new(type_name);
... when != dev = expr
- object_property_set_bool(OBJECT(dev), true, "realized", errp);
+ qdev_realize_and_unref(dev, bus, errp);
@@
expression bus, type_name, expr, errp;
identifier dev;
symbol true;
@@
- DeviceState *dev = qdev_create(bus, type_name);
+ DeviceState *dev = qdev_new(type_name);
... when != dev = expr
- object_property_set_bool(OBJECT(dev), true, "realized", errp);
+ qdev_realize_and_unref(dev, bus, errp);
The first rule exempts hw/arm/highbank.c, because it matches along two
control flow paths there, with different @type_name. Covered by the
next commit's manual conversions.
Missing #include "qapi/error.h" added manually.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200610053247.1583243-10-armbru@redhat.com>
[Conflicts in hw/misc/empty_slot.c and hw/sparc/leon3.c resolved]
2020-06-10 13:31:58 +08:00
|
|
|
dev = qdev_new(TYPE_GPEX_HOST);
|
2018-12-12 06:37:36 +08:00
|
|
|
|
sysbus: Convert to sysbus_realize() etc. with Coccinelle
Convert from qdev_realize(), qdev_realize_and_unref() with null @bus
argument to sysbus_realize(), sysbus_realize_and_unref().
Coccinelle script:
@@
expression dev, errp;
@@
- qdev_realize(DEVICE(dev), NULL, errp);
+ sysbus_realize(SYS_BUS_DEVICE(dev), errp);
@@
expression sysbus_dev, dev, errp;
@@
+ sysbus_dev = SYS_BUS_DEVICE(dev);
- qdev_realize_and_unref(dev, NULL, errp);
+ sysbus_realize_and_unref(sysbus_dev, errp);
- sysbus_dev = SYS_BUS_DEVICE(dev);
@@
expression sysbus_dev, dev, errp;
expression expr;
@@
sysbus_dev = SYS_BUS_DEVICE(dev);
... when != dev = expr;
- qdev_realize_and_unref(dev, NULL, errp);
+ sysbus_realize_and_unref(sysbus_dev, errp);
@@
expression dev, errp;
@@
- qdev_realize_and_unref(DEVICE(dev), NULL, errp);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp);
@@
expression dev, errp;
@@
- qdev_realize_and_unref(dev, NULL, errp);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp);
Whitespace changes minimized manually.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200610053247.1583243-46-armbru@redhat.com>
[Conflicts in hw/misc/empty_slot.c and hw/sparc/leon3.c resolved]
2020-06-10 13:32:34 +08:00
|
|
|
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
|
2018-12-12 06:37:36 +08:00
|
|
|
|
|
|
|
ecam_alias = g_new0(MemoryRegion, 1);
|
|
|
|
ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
|
|
|
|
memory_region_init_alias(ecam_alias, OBJECT(dev), "pcie-ecam",
|
|
|
|
ecam_reg, 0, ecam_size);
|
|
|
|
memory_region_add_subregion(get_system_memory(), ecam_base, ecam_alias);
|
|
|
|
|
|
|
|
mmio_alias = g_new0(MemoryRegion, 1);
|
|
|
|
mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
|
|
|
|
memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
|
|
|
|
mmio_reg, mmio_base, mmio_size);
|
|
|
|
memory_region_add_subregion(get_system_memory(), mmio_base, mmio_alias);
|
|
|
|
|
|
|
|
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base);
|
|
|
|
|
|
|
|
for (i = 0; i < GPEX_NUM_IRQS; i++) {
|
|
|
|
irq = qdev_get_gpio_in(plic, PCIE_IRQ + i);
|
|
|
|
|
|
|
|
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq);
|
|
|
|
gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
2020-05-21 22:42:27 +08:00
|
|
|
static void virt_machine_init(MachineState *machine)
|
2018-03-02 20:31:13 +08:00
|
|
|
{
|
|
|
|
const struct MemmapEntry *memmap = virt_memmap;
|
2019-10-09 07:32:22 +08:00
|
|
|
RISCVVirtState *s = RISCV_VIRT_MACHINE(machine);
|
2018-03-02 20:31:13 +08:00
|
|
|
MemoryRegion *system_memory = get_system_memory();
|
|
|
|
MemoryRegion *main_mem = g_new(MemoryRegion, 1);
|
2018-03-04 06:52:13 +08:00
|
|
|
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
char *plic_hart_config, *soc_name;
|
2018-03-02 20:31:13 +08:00
|
|
|
size_t plic_hart_config_len;
|
2019-10-09 07:32:29 +08:00
|
|
|
target_ulong start_addr = memmap[VIRT_DRAM].base;
|
2020-10-14 08:17:33 +08:00
|
|
|
target_ulong firmware_end_addr, kernel_start_addr;
|
2020-07-02 02:39:47 +08:00
|
|
|
uint32_t fdt_load_addr;
|
2020-07-02 02:39:48 +08:00
|
|
|
uint64_t kernel_entry;
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
DeviceState *mmio_plic, *virtio_plic, *pcie_plic;
|
|
|
|
int i, j, base_hartid, hart_count;
|
2018-03-02 20:31:13 +08:00
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
/* Check socket count limit */
|
|
|
|
if (VIRT_SOCKETS_MAX < riscv_socket_count(machine)) {
|
|
|
|
error_report("number of sockets/nodes should be less than %d",
|
|
|
|
VIRT_SOCKETS_MAX);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize sockets */
|
|
|
|
mmio_plic = virtio_plic = pcie_plic = NULL;
|
|
|
|
for (i = 0; i < riscv_socket_count(machine); i++) {
|
|
|
|
if (!riscv_socket_check_hartids(machine, i)) {
|
|
|
|
error_report("discontinuous hartids in socket%d", i);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
base_hartid = riscv_socket_first_hartid(machine, i);
|
|
|
|
if (base_hartid < 0) {
|
|
|
|
error_report("can't find hartid base for socket%d", i);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
hart_count = riscv_socket_hart_count(machine, i);
|
|
|
|
if (hart_count < 0) {
|
|
|
|
error_report("can't find hart count for socket%d", i);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
soc_name = g_strdup_printf("soc%d", i);
|
|
|
|
object_initialize_child(OBJECT(machine), soc_name, &s->soc[i],
|
|
|
|
TYPE_RISCV_HART_ARRAY);
|
|
|
|
g_free(soc_name);
|
|
|
|
object_property_set_str(OBJECT(&s->soc[i]), "cpu-type",
|
|
|
|
machine->cpu_type, &error_abort);
|
|
|
|
object_property_set_int(OBJECT(&s->soc[i]), "hartid-base",
|
|
|
|
base_hartid, &error_abort);
|
|
|
|
object_property_set_int(OBJECT(&s->soc[i]), "num-harts",
|
|
|
|
hart_count, &error_abort);
|
|
|
|
sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_abort);
|
|
|
|
|
|
|
|
/* Per-socket CLINT */
|
|
|
|
sifive_clint_create(
|
|
|
|
memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
|
|
|
|
memmap[VIRT_CLINT].size, base_hartid, hart_count,
|
2020-09-01 09:39:10 +08:00
|
|
|
SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE,
|
|
|
|
SIFIVE_CLINT_TIMEBASE_FREQ, true);
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
|
|
|
|
/* Per-socket PLIC hart topology configuration string */
|
|
|
|
plic_hart_config_len =
|
|
|
|
(strlen(VIRT_PLIC_HART_CONFIG) + 1) * hart_count;
|
|
|
|
plic_hart_config = g_malloc0(plic_hart_config_len);
|
|
|
|
for (j = 0; j < hart_count; j++) {
|
|
|
|
if (j != 0) {
|
|
|
|
strncat(plic_hart_config, ",", plic_hart_config_len);
|
|
|
|
}
|
|
|
|
strncat(plic_hart_config, VIRT_PLIC_HART_CONFIG,
|
|
|
|
plic_hart_config_len);
|
|
|
|
plic_hart_config_len -= (strlen(VIRT_PLIC_HART_CONFIG) + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Per-socket PLIC */
|
|
|
|
s->plic[i] = sifive_plic_create(
|
|
|
|
memmap[VIRT_PLIC].base + i * memmap[VIRT_PLIC].size,
|
|
|
|
plic_hart_config, base_hartid,
|
|
|
|
VIRT_PLIC_NUM_SOURCES,
|
|
|
|
VIRT_PLIC_NUM_PRIORITIES,
|
|
|
|
VIRT_PLIC_PRIORITY_BASE,
|
|
|
|
VIRT_PLIC_PENDING_BASE,
|
|
|
|
VIRT_PLIC_ENABLE_BASE,
|
|
|
|
VIRT_PLIC_ENABLE_STRIDE,
|
|
|
|
VIRT_PLIC_CONTEXT_BASE,
|
|
|
|
VIRT_PLIC_CONTEXT_STRIDE,
|
|
|
|
memmap[VIRT_PLIC].size);
|
|
|
|
g_free(plic_hart_config);
|
|
|
|
|
|
|
|
/* Try to use different PLIC instance based device type */
|
|
|
|
if (i == 0) {
|
|
|
|
mmio_plic = s->plic[i];
|
|
|
|
virtio_plic = s->plic[i];
|
|
|
|
pcie_plic = s->plic[i];
|
|
|
|
}
|
|
|
|
if (i == 1) {
|
|
|
|
virtio_plic = s->plic[i];
|
|
|
|
pcie_plic = s->plic[i];
|
|
|
|
}
|
|
|
|
if (i == 2) {
|
|
|
|
pcie_plic = s->plic[i];
|
|
|
|
}
|
|
|
|
}
|
2018-03-02 20:31:13 +08:00
|
|
|
|
|
|
|
/* register system main memory (actual RAM) */
|
|
|
|
memory_region_init_ram(main_mem, NULL, "riscv_virt_board.ram",
|
|
|
|
machine->ram_size, &error_fatal);
|
|
|
|
memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base,
|
|
|
|
main_mem);
|
|
|
|
|
|
|
|
/* create device tree */
|
2020-12-17 02:22:40 +08:00
|
|
|
create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline,
|
2021-01-16 07:00:27 +08:00
|
|
|
riscv_is_32bit(&s->soc[0]));
|
2018-03-02 20:31:13 +08:00
|
|
|
|
|
|
|
/* boot rom */
|
2018-03-04 06:52:13 +08:00
|
|
|
memory_region_init_rom(mask_rom, NULL, "riscv_virt_board.mrom",
|
|
|
|
memmap[VIRT_MROM].size, &error_fatal);
|
|
|
|
memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base,
|
|
|
|
mask_rom);
|
2018-03-02 20:31:13 +08:00
|
|
|
|
2021-01-16 07:00:27 +08:00
|
|
|
if (riscv_is_32bit(&s->soc[0])) {
|
2020-12-17 02:22:40 +08:00
|
|
|
firmware_end_addr = riscv_find_and_load_firmware(machine,
|
|
|
|
"opensbi-riscv32-generic-fw_dynamic.bin",
|
|
|
|
start_addr, NULL);
|
|
|
|
} else {
|
|
|
|
firmware_end_addr = riscv_find_and_load_firmware(machine,
|
|
|
|
"opensbi-riscv64-generic-fw_dynamic.bin",
|
|
|
|
start_addr, NULL);
|
|
|
|
}
|
2019-06-25 06:11:52 +08:00
|
|
|
|
2018-03-02 20:31:13 +08:00
|
|
|
if (machine->kernel_filename) {
|
2021-01-16 07:00:27 +08:00
|
|
|
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
|
2020-10-14 08:17:33 +08:00
|
|
|
firmware_end_addr);
|
|
|
|
|
|
|
|
kernel_entry = riscv_load_kernel(machine->kernel_filename,
|
|
|
|
kernel_start_addr, NULL);
|
2018-03-02 20:31:13 +08:00
|
|
|
|
|
|
|
if (machine->initrd_filename) {
|
|
|
|
hwaddr start;
|
2019-06-25 06:11:49 +08:00
|
|
|
hwaddr end = riscv_load_initrd(machine->initrd_filename,
|
|
|
|
machine->ram_size, kernel_entry,
|
|
|
|
&start);
|
2019-09-07 00:19:53 +08:00
|
|
|
qemu_fdt_setprop_cell(s->fdt, "/chosen",
|
2018-03-02 20:31:13 +08:00
|
|
|
"linux,initrd-start", start);
|
2019-09-07 00:19:53 +08:00
|
|
|
qemu_fdt_setprop_cell(s->fdt, "/chosen", "linux,initrd-end",
|
2018-03-02 20:31:13 +08:00
|
|
|
end);
|
|
|
|
}
|
2020-07-02 02:39:48 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If dynamic firmware is used, it doesn't know where is the next mode
|
|
|
|
* if kernel argument is not set.
|
|
|
|
*/
|
|
|
|
kernel_entry = 0;
|
2018-03-02 20:31:13 +08:00
|
|
|
}
|
|
|
|
|
2019-10-09 07:32:29 +08:00
|
|
|
if (drive_get(IF_PFLASH, 0, 0)) {
|
|
|
|
/*
|
|
|
|
* Pflash was supplied, let's overwrite the address we jump to after
|
|
|
|
* reset to the base of the flash.
|
|
|
|
*/
|
|
|
|
start_addr = virt_memmap[VIRT_FLASH].base;
|
|
|
|
}
|
|
|
|
|
2020-07-02 02:39:47 +08:00
|
|
|
/* Compute the fdt load address in dram */
|
|
|
|
fdt_load_addr = riscv_load_fdt(memmap[VIRT_DRAM].base,
|
|
|
|
machine->ram_size, s->fdt);
|
2020-07-02 02:39:46 +08:00
|
|
|
/* load the reset vector */
|
2021-01-16 07:00:27 +08:00
|
|
|
riscv_setup_rom_reset_vec(machine, &s->soc[0], start_addr,
|
2020-12-17 02:23:08 +08:00
|
|
|
virt_memmap[VIRT_MROM].base,
|
2020-07-02 02:39:48 +08:00
|
|
|
virt_memmap[VIRT_MROM].size, kernel_entry,
|
2020-07-02 02:39:47 +08:00
|
|
|
fdt_load_addr, s->fdt);
|
2018-03-02 20:31:13 +08:00
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
/* SiFive Test MMIO device */
|
2018-03-02 20:31:13 +08:00
|
|
|
sifive_test_create(memmap[VIRT_TEST].base);
|
|
|
|
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
/* VirtIO MMIO devices */
|
2018-03-02 20:31:13 +08:00
|
|
|
for (i = 0; i < VIRTIO_COUNT; i++) {
|
|
|
|
sysbus_create_simple("virtio-mmio",
|
|
|
|
memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qdev_get_gpio_in(DEVICE(virtio_plic), VIRTIO_IRQ + i));
|
2018-03-02 20:31:13 +08:00
|
|
|
}
|
|
|
|
|
2018-12-12 06:37:36 +08:00
|
|
|
gpex_pcie_init(system_memory,
|
|
|
|
memmap[VIRT_PCIE_ECAM].base,
|
|
|
|
memmap[VIRT_PCIE_ECAM].size,
|
|
|
|
memmap[VIRT_PCIE_MMIO].base,
|
|
|
|
memmap[VIRT_PCIE_MMIO].size,
|
|
|
|
memmap[VIRT_PCIE_PIO].base,
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
DEVICE(pcie_plic), true);
|
2018-12-12 06:37:36 +08:00
|
|
|
|
2018-03-02 20:31:13 +08:00
|
|
|
serial_mm_init(system_memory, memmap[VIRT_UART0].base,
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
0, qdev_get_gpio_in(DEVICE(mmio_plic), UART0_IRQ), 399193,
|
2018-04-20 22:52:43 +08:00
|
|
|
serial_hd(0), DEVICE_LITTLE_ENDIAN);
|
2018-04-30 08:29:34 +08:00
|
|
|
|
2019-11-06 19:56:43 +08:00
|
|
|
sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base,
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
qdev_get_gpio_in(DEVICE(mmio_plic), RTC_IRQ));
|
2019-11-06 19:56:43 +08:00
|
|
|
|
2019-10-09 07:32:25 +08:00
|
|
|
virt_flash_create(s);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->flash); i++) {
|
|
|
|
/* Map legacy -drive if=pflash to machine properties */
|
|
|
|
pflash_cfi01_legacy_drive(s->flash[i],
|
|
|
|
drive_get(IF_PFLASH, 0, i));
|
|
|
|
}
|
|
|
|
virt_flash_map(s, system_memory);
|
2018-03-02 20:31:13 +08:00
|
|
|
}
|
|
|
|
|
2020-05-21 22:42:27 +08:00
|
|
|
static void virt_machine_instance_init(Object *obj)
|
2018-03-02 20:31:13 +08:00
|
|
|
{
|
2019-10-09 07:32:22 +08:00
|
|
|
}
|
|
|
|
|
2020-05-21 22:42:27 +08:00
|
|
|
static void virt_machine_class_init(ObjectClass *oc, void *data)
|
2019-10-09 07:32:22 +08:00
|
|
|
{
|
|
|
|
MachineClass *mc = MACHINE_CLASS(oc);
|
|
|
|
|
|
|
|
mc->desc = "RISC-V VirtIO board";
|
2020-05-21 22:42:27 +08:00
|
|
|
mc->init = virt_machine_init;
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
mc->max_cpus = VIRT_CPUS_MAX;
|
2020-12-17 02:22:34 +08:00
|
|
|
mc->default_cpu_type = TYPE_RISCV_CPU_BASE;
|
2019-11-22 23:27:52 +08:00
|
|
|
mc->pci_allow_0_address = true;
|
hw/riscv: virt: Allow creating multiple NUMA sockets
We extend RISC-V virt machine to allow creating a multi-socket
machine. Each RISC-V virt machine socket is a NUMA node having
a set of HARTs, a memory instance, a CLINT instance, and a PLIC
instance. Other devices are shared between all sockets. We also
update the generated device tree accordingly.
By default, NUMA multi-socket support is disabled for RISC-V virt
machine. To enable it, users can use "-numa" command-line options
of QEMU.
Example1: For two NUMA nodes with 2 CPUs each, append following
to command-line options: "-smp 4 -numa node -numa node"
Example2: For two NUMA nodes with 1 and 3 CPUs, append following
to command-line options:
"-smp 4 -numa node -numa node -numa cpu,node-id=0,core-id=0 \
-numa cpu,node-id=1,core-id=1 -numa cpu,node-id=1,core-id=2 \
-numa cpu,node-id=1,core-id=3"
The maximum number of sockets in a RISC-V virt machine is 8
but this limit can be changed in future.
Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Message-Id: <20200616032229.766089-6-anup.patel@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2020-05-15 17:28:50 +08:00
|
|
|
mc->possible_cpu_arch_ids = riscv_numa_possible_cpu_arch_ids;
|
|
|
|
mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props;
|
|
|
|
mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id;
|
|
|
|
mc->numa_mem_supported = true;
|
2018-03-02 20:31:13 +08:00
|
|
|
}
|
|
|
|
|
2020-05-21 22:42:27 +08:00
|
|
|
static const TypeInfo virt_machine_typeinfo = {
|
2019-10-09 07:32:22 +08:00
|
|
|
.name = MACHINE_TYPE_NAME("virt"),
|
|
|
|
.parent = TYPE_MACHINE,
|
2020-05-21 22:42:27 +08:00
|
|
|
.class_init = virt_machine_class_init,
|
|
|
|
.instance_init = virt_machine_instance_init,
|
2019-10-09 07:32:22 +08:00
|
|
|
.instance_size = sizeof(RISCVVirtState),
|
|
|
|
};
|
|
|
|
|
2020-05-21 22:42:27 +08:00
|
|
|
static void virt_machine_init_register_types(void)
|
2019-10-09 07:32:22 +08:00
|
|
|
{
|
2020-05-21 22:42:27 +08:00
|
|
|
type_register_static(&virt_machine_typeinfo);
|
2019-10-09 07:32:22 +08:00
|
|
|
}
|
|
|
|
|
2020-05-21 22:42:27 +08:00
|
|
|
type_init(virt_machine_init_register_types)
|