Merge tag 'gvt-next-2022-04-29' into v5.19/vfio/next
Merge GVT-g dependencies for vfio. Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
commit
5acb6cd19d
|
@ -105,6 +105,7 @@ structure to represent a mediated device's driver::
|
|||
struct mdev_driver {
|
||||
int (*probe) (struct mdev_device *dev);
|
||||
void (*remove) (struct mdev_device *dev);
|
||||
struct attribute_group **supported_type_groups;
|
||||
struct device_driver driver;
|
||||
};
|
||||
|
||||
|
@ -119,33 +120,15 @@ to register and unregister itself with the core driver:
|
|||
|
||||
extern void mdev_unregister_driver(struct mdev_driver *drv);
|
||||
|
||||
The mediated bus driver is responsible for adding mediated devices to the VFIO
|
||||
group when devices are bound to the driver and removing mediated devices from
|
||||
the VFIO when devices are unbound from the driver.
|
||||
|
||||
|
||||
Physical Device Driver Interface
|
||||
--------------------------------
|
||||
|
||||
The physical device driver interface provides the mdev_parent_ops[3] structure
|
||||
to define the APIs to manage work in the mediated core driver that is related
|
||||
to the physical device.
|
||||
|
||||
The structures in the mdev_parent_ops structure are as follows:
|
||||
|
||||
* dev_attr_groups: attributes of the parent device
|
||||
* mdev_attr_groups: attributes of the mediated device
|
||||
* supported_config: attributes to define supported configurations
|
||||
* device_driver: device driver to bind for mediated device instances
|
||||
|
||||
The mdev_parent_ops also still has various functions pointers. Theses exist
|
||||
for historical reasons only and shall not be used for new drivers.
|
||||
The mediated bus driver's probe function should create a vfio_device on top of
|
||||
the mdev_device and connect it to an appropriate implementation of
|
||||
vfio_device_ops.
|
||||
|
||||
When a driver wants to add the GUID creation sysfs to an existing device it has
|
||||
probe'd to then it should call::
|
||||
|
||||
extern int mdev_register_device(struct device *dev,
|
||||
const struct mdev_parent_ops *ops);
|
||||
struct mdev_driver *mdev_driver);
|
||||
|
||||
This will provide the 'mdev_supported_types/XX/create' files which can then be
|
||||
used to trigger the creation of a mdev_device. The created mdev_device will be
|
||||
|
|
|
@ -102,40 +102,30 @@ config DRM_I915_USERPTR
|
|||
If in doubt, say "Y".
|
||||
|
||||
config DRM_I915_GVT
|
||||
bool "Enable Intel GVT-g graphics virtualization host support"
|
||||
bool
|
||||
|
||||
config DRM_I915_GVT_KVMGT
|
||||
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
|
||||
depends on DRM_I915
|
||||
depends on X86
|
||||
depends on 64BIT
|
||||
default n
|
||||
depends on KVM
|
||||
depends on VFIO_MDEV
|
||||
select DRM_I915_GVT
|
||||
select KVM_EXTERNAL_WRITE_TRACKING
|
||||
|
||||
help
|
||||
Choose this option if you want to enable Intel GVT-g graphics
|
||||
virtualization technology host support with integrated graphics.
|
||||
With GVT-g, it's possible to have one integrated graphics
|
||||
device shared by multiple VMs under different hypervisors.
|
||||
device shared by multiple VMs under KVM.
|
||||
|
||||
Note that at least one hypervisor like Xen or KVM is required for
|
||||
this driver to work, and it only supports newer device from
|
||||
Broadwell+. For further information and setup guide, you can
|
||||
visit: http://01.org/igvt-g.
|
||||
|
||||
Now it's just a stub to support the modifications of i915 for
|
||||
GVT device model. It requires at least one MPT modules for Xen/KVM
|
||||
and other components of GVT device model to work. Use it under
|
||||
you own risk.
|
||||
Note that this driver only supports newer device from Broadwell on.
|
||||
For further information and setup guide, you can visit:
|
||||
http://01.org/igvt-g.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_I915_GVT_KVMGT
|
||||
tristate "Enable KVM/VFIO support for Intel GVT-g"
|
||||
depends on DRM_I915_GVT
|
||||
depends on KVM
|
||||
depends on VFIO_MDEV
|
||||
select KVM_EXTERNAL_WRITE_TRACKING
|
||||
default n
|
||||
help
|
||||
Choose this option if you want to enable KVMGT support for
|
||||
Intel GVT-g.
|
||||
|
||||
config DRM_I915_PXP
|
||||
bool "Enable Intel PXP support"
|
||||
depends on DRM_I915
|
||||
|
|
|
@ -320,13 +320,13 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
|
|||
# virtual gpu code
|
||||
i915-y += i915_vgpu.o
|
||||
|
||||
ifeq ($(CONFIG_DRM_I915_GVT),y)
|
||||
i915-y += intel_gvt.o
|
||||
i915-$(CONFIG_DRM_I915_GVT) += \
|
||||
intel_gvt.o \
|
||||
intel_gvt_mmio_table.o
|
||||
include $(src)/gvt/Makefile
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_DRM_I915) += i915.o
|
||||
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
|
||||
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
|
||||
|
||||
# header test
|
||||
|
||||
|
|
|
@ -1,9 +1,25 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
GVT_DIR := gvt
|
||||
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
|
||||
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
|
||||
execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
|
||||
fb_decoder.o dmabuf.o page_track.o
|
||||
|
||||
ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/
|
||||
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
|
||||
kvmgt-$(CONFIG_DRM_I915_GVT) += \
|
||||
gvt/aperture_gm.o \
|
||||
gvt/cfg_space.o \
|
||||
gvt/cmd_parser.o \
|
||||
gvt/debugfs.o \
|
||||
gvt/display.o \
|
||||
gvt/dmabuf.o \
|
||||
gvt/edid.o \
|
||||
gvt/execlist.o \
|
||||
gvt/fb_decoder.o \
|
||||
gvt/firmware.o \
|
||||
gvt/gtt.o \
|
||||
gvt/handlers.o \
|
||||
gvt/interrupt.o \
|
||||
gvt/kvmgt.o \
|
||||
gvt/mmio.o \
|
||||
gvt/mmio_context.o \
|
||||
gvt/opregion.o \
|
||||
gvt/page_track.o \
|
||||
gvt/sched_policy.o \
|
||||
gvt/scheduler.o \
|
||||
gvt/trace_points.o \
|
||||
gvt/vgpu.o
|
||||
|
|
|
@ -129,60 +129,16 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int map_aperture(struct intel_vgpu *vgpu, bool map)
|
||||
static void map_aperture(struct intel_vgpu *vgpu, bool map)
|
||||
{
|
||||
phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
|
||||
unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
|
||||
u64 first_gfn;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
|
||||
return 0;
|
||||
|
||||
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
|
||||
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
|
||||
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
|
||||
else
|
||||
val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
|
||||
|
||||
first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
|
||||
|
||||
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
|
||||
aperture_pa >> PAGE_SHIFT,
|
||||
aperture_sz >> PAGE_SHIFT,
|
||||
map);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
|
||||
return 0;
|
||||
if (map != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
|
||||
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
|
||||
}
|
||||
|
||||
static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
|
||||
static void trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
|
||||
{
|
||||
u64 start, end;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
|
||||
return 0;
|
||||
|
||||
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
|
||||
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
|
||||
start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
|
||||
else
|
||||
start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
|
||||
|
||||
start &= ~GENMASK(3, 0);
|
||||
end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
|
||||
|
||||
ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
|
||||
return 0;
|
||||
if (trap != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
|
||||
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
|
||||
}
|
||||
|
||||
static int emulate_pci_command_write(struct intel_vgpu *vgpu,
|
||||
|
@ -191,26 +147,17 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
|
|||
u8 old = vgpu_cfg_space(vgpu)[offset];
|
||||
u8 new = *(u8 *)p_data;
|
||||
u8 changed = old ^ new;
|
||||
int ret;
|
||||
|
||||
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
|
||||
if (!(changed & PCI_COMMAND_MEMORY))
|
||||
return 0;
|
||||
|
||||
if (old & PCI_COMMAND_MEMORY) {
|
||||
ret = trap_gttmmio(vgpu, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = map_aperture(vgpu, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
trap_gttmmio(vgpu, false);
|
||||
map_aperture(vgpu, false);
|
||||
} else {
|
||||
ret = trap_gttmmio(vgpu, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = map_aperture(vgpu, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
trap_gttmmio(vgpu, true);
|
||||
map_aperture(vgpu, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -230,13 +177,12 @@ static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
static void emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 new = *(u32 *)(p_data);
|
||||
bool lo = IS_ALIGNED(offset, 8);
|
||||
u64 size;
|
||||
int ret = 0;
|
||||
bool mmio_enabled =
|
||||
vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
|
||||
struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
|
||||
|
@ -259,14 +205,14 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
* Untrap the BAR, since guest hasn't configured a
|
||||
* valid GPA
|
||||
*/
|
||||
ret = trap_gttmmio(vgpu, false);
|
||||
trap_gttmmio(vgpu, false);
|
||||
break;
|
||||
case PCI_BASE_ADDRESS_2:
|
||||
case PCI_BASE_ADDRESS_3:
|
||||
size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
|
||||
intel_vgpu_write_pci_bar(vgpu, offset,
|
||||
size >> (lo ? 0 : 32), lo);
|
||||
ret = map_aperture(vgpu, false);
|
||||
map_aperture(vgpu, false);
|
||||
break;
|
||||
default:
|
||||
/* Unimplemented BARs */
|
||||
|
@ -282,19 +228,18 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
*/
|
||||
trap_gttmmio(vgpu, false);
|
||||
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
|
||||
ret = trap_gttmmio(vgpu, mmio_enabled);
|
||||
trap_gttmmio(vgpu, mmio_enabled);
|
||||
break;
|
||||
case PCI_BASE_ADDRESS_2:
|
||||
case PCI_BASE_ADDRESS_3:
|
||||
map_aperture(vgpu, false);
|
||||
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
|
||||
ret = map_aperture(vgpu, mmio_enabled);
|
||||
map_aperture(vgpu, mmio_enabled);
|
||||
break;
|
||||
default:
|
||||
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -336,8 +281,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
|
||||
if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
|
||||
return -EINVAL;
|
||||
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
|
||||
|
||||
emulate_pci_bar_write(vgpu, offset, p_data, bytes);
|
||||
break;
|
||||
case INTEL_GVT_PCI_SWSCI:
|
||||
if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1011,7 +1011,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
|||
if (GRAPHICS_VER(s->engine->i915) == 9 &&
|
||||
intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
|
||||
!strncmp(cmd, "lri", 3)) {
|
||||
intel_gvt_hypervisor_read_gpa(s->vgpu,
|
||||
intel_gvt_read_gpa(s->vgpu,
|
||||
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
|
||||
/* check inhibit context */
|
||||
if (ctx_sr_ctl & 1) {
|
||||
|
@ -1775,7 +1775,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
|
|||
copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
|
||||
I915_GTT_PAGE_SIZE - offset : end_gma - gma;
|
||||
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
|
||||
intel_gvt_read_gpa(vgpu, gpa, va + len, copy_len);
|
||||
|
||||
len += copy_len;
|
||||
gma += copy_len;
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/vfio.h>
|
||||
#include <linux/mdev.h>
|
||||
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_plane.h>
|
||||
|
@ -42,24 +42,6 @@
|
|||
|
||||
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
|
||||
|
||||
static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
|
||||
unsigned long size,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
|
||||
ret = -EINVAL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
|
||||
}
|
||||
|
||||
static int vgpu_gem_get_pages(
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
|
@ -95,7 +77,7 @@ static int vgpu_gem_get_pages(
|
|||
for_each_sg(st->sgl, sg, page_num, i) {
|
||||
dma_addr_t dma_addr =
|
||||
GEN8_DECODE_PTE(readq(>t_entries[i]));
|
||||
if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
|
||||
if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -114,7 +96,7 @@ static int vgpu_gem_get_pages(
|
|||
for_each_sg(st->sgl, sg, i, j) {
|
||||
dma_addr = sg_dma_address(sg);
|
||||
if (dma_addr)
|
||||
vgpu_unpin_dma_address(vgpu, dma_addr);
|
||||
intel_gvt_dma_unmap_guest_page(vgpu, dma_addr);
|
||||
}
|
||||
sg_free_table(st);
|
||||
kfree(st);
|
||||
|
@ -136,7 +118,7 @@ static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
|
|||
int i;
|
||||
|
||||
for_each_sg(pages->sgl, sg, fb_info->size, i)
|
||||
vgpu_unpin_dma_address(vgpu,
|
||||
intel_gvt_dma_unmap_guest_page(vgpu,
|
||||
sg_dma_address(sg));
|
||||
}
|
||||
|
||||
|
@ -157,7 +139,6 @@ static void dmabuf_gem_object_free(struct kref *kref)
|
|||
dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
|
||||
if (dmabuf_obj == obj) {
|
||||
list_del(pos);
|
||||
intel_gvt_hypervisor_put_vfio_device(vgpu);
|
||||
idr_remove(&vgpu->object_idr,
|
||||
dmabuf_obj->dmabuf_id);
|
||||
kfree(dmabuf_obj->info);
|
||||
|
@ -491,14 +472,6 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
|
|||
|
||||
kref_init(&dmabuf_obj->kref);
|
||||
|
||||
mutex_lock(&vgpu->dmabuf_lock);
|
||||
if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
|
||||
gvt_vgpu_err("get vfio device failed\n");
|
||||
mutex_unlock(&vgpu->dmabuf_lock);
|
||||
goto out_free_info;
|
||||
}
|
||||
mutex_unlock(&vgpu->dmabuf_lock);
|
||||
|
||||
update_fb_info(gfx_plane_info, &fb_info);
|
||||
|
||||
INIT_LIST_HEAD(&dmabuf_obj->list);
|
||||
|
@ -603,7 +576,6 @@ void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
|
|||
dmabuf_obj->vgpu = NULL;
|
||||
|
||||
idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
|
||||
intel_gvt_hypervisor_put_vfio_device(vgpu);
|
||||
list_del(pos);
|
||||
|
||||
/* dmabuf_obj might be freed in dmabuf_obj_put */
|
||||
|
|
|
@ -159,12 +159,12 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
|
|||
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
|
||||
vgpu->hws_pga[execlist->engine->id]);
|
||||
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
|
||||
intel_gvt_hypervisor_write_gpa(vgpu,
|
||||
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
|
||||
status, 8);
|
||||
intel_gvt_hypervisor_write_gpa(vgpu,
|
||||
hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
|
||||
&write_pointer, 4);
|
||||
intel_gvt_write_gpa(vgpu,
|
||||
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
|
||||
status, 8);
|
||||
intel_gvt_write_gpa(vgpu,
|
||||
hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
|
||||
&write_pointer, 4);
|
||||
}
|
||||
|
||||
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
|
||||
|
|
|
@ -66,22 +66,16 @@ static struct bin_attribute firmware_attr = {
|
|||
.mmap = NULL,
|
||||
};
|
||||
|
||||
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
|
||||
{
|
||||
*(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
|
||||
_MMIO(offset));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
|
||||
struct drm_i915_private *i915 = gvt->gt->i915;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
struct gvt_firmware_header *h;
|
||||
void *firmware;
|
||||
void *p;
|
||||
unsigned long size, crc32_start;
|
||||
int i, ret;
|
||||
int ret;
|
||||
|
||||
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
|
||||
firmware = vzalloc(size);
|
||||
|
@ -99,17 +93,16 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
|||
|
||||
p = firmware + h->cfg_space_offset;
|
||||
|
||||
for (i = 0; i < h->cfg_space_size; i += 4)
|
||||
pci_read_config_dword(pdev, i, p + i);
|
||||
|
||||
memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
|
||||
memcpy(gvt->firmware.cfg_space, i915->vgpu.initial_cfg_space,
|
||||
info->cfg_space_size);
|
||||
memcpy(p, gvt->firmware.cfg_space, info->cfg_space_size);
|
||||
|
||||
p = firmware + h->mmio_offset;
|
||||
|
||||
/* Take a snapshot of hw mmio registers. */
|
||||
intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p);
|
||||
memcpy(gvt->firmware.mmio, i915->vgpu.initial_mmio,
|
||||
info->mmio_size);
|
||||
|
||||
memcpy(gvt->firmware.mmio, p, info->mmio_size);
|
||||
memcpy(p, gvt->firmware.mmio, info->mmio_size);
|
||||
|
||||
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
|
||||
h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
|
||||
|
|
|
@ -49,6 +49,22 @@
|
|||
static bool enable_out_of_sync = false;
|
||||
static int preallocated_oos_pages = 8192;
|
||||
|
||||
static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
|
||||
{
|
||||
struct kvm *kvm = vgpu->kvm;
|
||||
int idx;
|
||||
bool ret;
|
||||
|
||||
if (!vgpu->attached)
|
||||
return false;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
ret = kvm_is_visible_gfn(kvm, gfn);
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* validate a gm address and related range size,
|
||||
* translate it to host gm address
|
||||
|
@ -314,7 +330,7 @@ static inline int gtt_get_entry64(void *pt,
|
|||
return -EINVAL;
|
||||
|
||||
if (hypervisor_access) {
|
||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
|
||||
ret = intel_gvt_read_gpa(vgpu, gpa +
|
||||
(index << info->gtt_entry_size_shift),
|
||||
&e->val64, 8);
|
||||
if (WARN_ON(ret))
|
||||
|
@ -339,7 +355,7 @@ static inline int gtt_set_entry64(void *pt,
|
|||
return -EINVAL;
|
||||
|
||||
if (hypervisor_access) {
|
||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
|
||||
ret = intel_gvt_write_gpa(vgpu, gpa +
|
||||
(index << info->gtt_entry_size_shift),
|
||||
&e->val64, 8);
|
||||
if (WARN_ON(ret))
|
||||
|
@ -997,7 +1013,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
|
|||
if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
|
||||
return;
|
||||
|
||||
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
|
||||
intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
|
||||
|
@ -1162,15 +1178,16 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
|
|||
struct intel_gvt_gtt_entry *entry)
|
||||
{
|
||||
const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
unsigned long pfn;
|
||||
kvm_pfn_t pfn;
|
||||
|
||||
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
|
||||
return 0;
|
||||
|
||||
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
|
||||
if (pfn == INTEL_GVT_INVALID_ADDR)
|
||||
if (!vgpu->attached)
|
||||
return -EINVAL;
|
||||
pfn = gfn_to_pfn(vgpu->kvm, ops->get_pfn(entry));
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return -EINVAL;
|
||||
|
||||
return PageTransHuge(pfn_to_page(pfn));
|
||||
}
|
||||
|
||||
|
@ -1195,8 +1212,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
|
|||
return PTR_ERR(sub_spt);
|
||||
|
||||
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
|
||||
start_gfn + sub_index, PAGE_SIZE, &dma_addr);
|
||||
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
|
||||
PAGE_SIZE, &dma_addr);
|
||||
if (ret) {
|
||||
ppgtt_invalidate_spt(spt);
|
||||
return ret;
|
||||
|
@ -1241,8 +1258,8 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
|
|||
ops->set_64k_splited(&entry);
|
||||
|
||||
for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
|
||||
start_gfn + i, PAGE_SIZE, &dma_addr);
|
||||
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
|
||||
PAGE_SIZE, &dma_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1296,8 +1313,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
|
|||
}
|
||||
|
||||
/* direct shadow */
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
|
||||
&dma_addr);
|
||||
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
|
||||
if (ret)
|
||||
return -ENXIO;
|
||||
|
||||
|
@ -1331,7 +1347,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
|
|||
ppgtt_set_shadow_entry(spt, &se, i);
|
||||
} else {
|
||||
gfn = ops->get_pfn(&ge);
|
||||
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
|
||||
if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
|
||||
ops->set_pfn(&se, gvt->gtt.scratch_mfn);
|
||||
ppgtt_set_shadow_entry(spt, &se, i);
|
||||
continue;
|
||||
|
@ -1497,7 +1513,7 @@ static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
|
|||
struct intel_gvt *gvt = spt->vgpu->gvt;
|
||||
int ret;
|
||||
|
||||
ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
|
||||
ret = intel_gvt_read_gpa(spt->vgpu,
|
||||
spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
|
||||
oos_page->mem, I915_GTT_PAGE_SIZE);
|
||||
if (ret)
|
||||
|
@ -2228,8 +2244,7 @@ static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
|
|||
|
||||
pfn = pte_ops->get_pfn(entry);
|
||||
if (pfn != vgpu->gvt->gtt.scratch_mfn)
|
||||
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
|
||||
pfn << PAGE_SHIFT);
|
||||
intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||
|
@ -2315,13 +2330,13 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
/* one PTE update may be issued in multiple writes and the
|
||||
* first write may not construct a valid gfn
|
||||
*/
|
||||
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
|
||||
if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
|
||||
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
|
||||
PAGE_SIZE, &dma_addr);
|
||||
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
|
||||
&dma_addr);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to populate guest ggtt entry\n");
|
||||
/* guest driver may read/write the entry when partial
|
||||
|
|
|
@ -1,340 +0,0 @@
|
|||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
* Eddie Dong <eddie.dong@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Niu Bing <bing.niu@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_gvt.h"
|
||||
#include "gvt.h"
|
||||
#include <linux/vfio.h>
|
||||
#include <linux/mdev.h>
|
||||
|
||||
struct intel_gvt_host intel_gvt_host;
|
||||
|
||||
static const char * const supported_hypervisors[] = {
|
||||
[INTEL_GVT_HYPERVISOR_XEN] = "XEN",
|
||||
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
|
||||
};
|
||||
|
||||
static const struct intel_gvt_ops intel_gvt_ops = {
|
||||
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
|
||||
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
|
||||
.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
|
||||
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
|
||||
.vgpu_create = intel_gvt_create_vgpu,
|
||||
.vgpu_destroy = intel_gvt_destroy_vgpu,
|
||||
.vgpu_release = intel_gvt_release_vgpu,
|
||||
.vgpu_reset = intel_gvt_reset_vgpu,
|
||||
.vgpu_activate = intel_gvt_activate_vgpu,
|
||||
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
|
||||
.vgpu_query_plane = intel_vgpu_query_plane,
|
||||
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
|
||||
.write_protect_handler = intel_vgpu_page_track_handler,
|
||||
.emulate_hotplug = intel_vgpu_emulate_hotplug,
|
||||
};
|
||||
|
||||
static void init_device_info(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
|
||||
|
||||
info->max_support_vgpus = 8;
|
||||
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
|
||||
info->mmio_size = 2 * 1024 * 1024;
|
||||
info->mmio_bar = 0;
|
||||
info->gtt_start_offset = 8 * 1024 * 1024;
|
||||
info->gtt_entry_size = 8;
|
||||
info->gtt_entry_size_shift = 3;
|
||||
info->gmadr_bytes_in_cmd = 8;
|
||||
info->max_surface_size = 36 * 1024 * 1024;
|
||||
info->msi_cap_offset = pdev->msi_cap;
|
||||
}
|
||||
|
||||
static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_vgpu *vgpu;
|
||||
int id;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
|
||||
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
|
||||
(void *)&gvt->service_request)) {
|
||||
if (vgpu->active)
|
||||
intel_vgpu_emulate_vblank(vgpu);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
static int gvt_service_thread(void *data)
|
||||
{
|
||||
struct intel_gvt *gvt = (struct intel_gvt *)data;
|
||||
int ret;
|
||||
|
||||
gvt_dbg_core("service thread start\n");
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
ret = wait_event_interruptible(gvt->service_thread_wq,
|
||||
kthread_should_stop() || gvt->service_request);
|
||||
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
|
||||
continue;
|
||||
|
||||
intel_gvt_test_and_emulate_vblank(gvt);
|
||||
|
||||
if (test_bit(INTEL_GVT_REQUEST_SCHED,
|
||||
(void *)&gvt->service_request) ||
|
||||
test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
|
||||
(void *)&gvt->service_request)) {
|
||||
intel_gvt_schedule(gvt);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void clean_service_thread(struct intel_gvt *gvt)
|
||||
{
|
||||
kthread_stop(gvt->service_thread);
|
||||
}
|
||||
|
||||
static int init_service_thread(struct intel_gvt *gvt)
|
||||
{
|
||||
init_waitqueue_head(&gvt->service_thread_wq);
|
||||
|
||||
gvt->service_thread = kthread_run(gvt_service_thread,
|
||||
gvt, "gvt_service_thread");
|
||||
if (IS_ERR(gvt->service_thread)) {
|
||||
gvt_err("fail to start service thread.\n");
|
||||
return PTR_ERR(gvt->service_thread);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_clean_device - clean a GVT device
|
||||
* @i915: i915 private
|
||||
*
|
||||
* This function is called at the driver unloading stage, to free the
|
||||
* resources owned by a GVT device.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_clean_device(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, !gvt))
|
||||
return;
|
||||
|
||||
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
|
||||
intel_gvt_clean_vgpu_types(gvt);
|
||||
|
||||
intel_gvt_debugfs_clean(gvt);
|
||||
clean_service_thread(gvt);
|
||||
intel_gvt_clean_cmd_parser(gvt);
|
||||
intel_gvt_clean_sched_policy(gvt);
|
||||
intel_gvt_clean_workload_scheduler(gvt);
|
||||
intel_gvt_clean_gtt(gvt);
|
||||
intel_gvt_free_firmware(gvt);
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
|
||||
kfree(i915->gvt);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_init_device - initialize a GVT device
|
||||
* @i915: drm i915 private data
|
||||
*
|
||||
* This function is called at the initialization stage, to initialize
|
||||
* necessary GVT components.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*
|
||||
*/
|
||||
int intel_gvt_init_device(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_gvt *gvt;
|
||||
struct intel_vgpu *vgpu;
|
||||
int ret;
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, i915->gvt))
|
||||
return -EEXIST;
|
||||
|
||||
gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
|
||||
if (!gvt)
|
||||
return -ENOMEM;
|
||||
|
||||
gvt_dbg_core("init gvt device\n");
|
||||
|
||||
idr_init_base(&gvt->vgpu_idr, 1);
|
||||
spin_lock_init(&gvt->scheduler.mmio_context_lock);
|
||||
mutex_init(&gvt->lock);
|
||||
mutex_init(&gvt->sched_lock);
|
||||
gvt->gt = to_gt(i915);
|
||||
i915->gvt = gvt;
|
||||
|
||||
init_device_info(gvt);
|
||||
|
||||
ret = intel_gvt_setup_mmio_info(gvt);
|
||||
if (ret)
|
||||
goto out_clean_idr;
|
||||
|
||||
intel_gvt_init_engine_mmio_context(gvt);
|
||||
|
||||
ret = intel_gvt_load_firmware(gvt);
|
||||
if (ret)
|
||||
goto out_clean_mmio_info;
|
||||
|
||||
ret = intel_gvt_init_irq(gvt);
|
||||
if (ret)
|
||||
goto out_free_firmware;
|
||||
|
||||
ret = intel_gvt_init_gtt(gvt);
|
||||
if (ret)
|
||||
goto out_free_firmware;
|
||||
|
||||
ret = intel_gvt_init_workload_scheduler(gvt);
|
||||
if (ret)
|
||||
goto out_clean_gtt;
|
||||
|
||||
ret = intel_gvt_init_sched_policy(gvt);
|
||||
if (ret)
|
||||
goto out_clean_workload_scheduler;
|
||||
|
||||
ret = intel_gvt_init_cmd_parser(gvt);
|
||||
if (ret)
|
||||
goto out_clean_sched_policy;
|
||||
|
||||
ret = init_service_thread(gvt);
|
||||
if (ret)
|
||||
goto out_clean_cmd_parser;
|
||||
|
||||
ret = intel_gvt_init_vgpu_types(gvt);
|
||||
if (ret)
|
||||
goto out_clean_thread;
|
||||
|
||||
vgpu = intel_gvt_create_idle_vgpu(gvt);
|
||||
if (IS_ERR(vgpu)) {
|
||||
ret = PTR_ERR(vgpu);
|
||||
gvt_err("failed to create idle vgpu\n");
|
||||
goto out_clean_types;
|
||||
}
|
||||
gvt->idle_vgpu = vgpu;
|
||||
|
||||
intel_gvt_debugfs_init(gvt);
|
||||
|
||||
gvt_dbg_core("gvt device initialization is done\n");
|
||||
intel_gvt_host.dev = i915->drm.dev;
|
||||
intel_gvt_host.initialized = true;
|
||||
return 0;
|
||||
|
||||
out_clean_types:
|
||||
intel_gvt_clean_vgpu_types(gvt);
|
||||
out_clean_thread:
|
||||
clean_service_thread(gvt);
|
||||
out_clean_cmd_parser:
|
||||
intel_gvt_clean_cmd_parser(gvt);
|
||||
out_clean_sched_policy:
|
||||
intel_gvt_clean_sched_policy(gvt);
|
||||
out_clean_workload_scheduler:
|
||||
intel_gvt_clean_workload_scheduler(gvt);
|
||||
out_clean_gtt:
|
||||
intel_gvt_clean_gtt(gvt);
|
||||
out_free_firmware:
|
||||
intel_gvt_free_firmware(gvt);
|
||||
out_clean_mmio_info:
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
out_clean_idr:
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
kfree(gvt);
|
||||
i915->gvt = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
intel_gvt_pm_resume(struct intel_gvt *gvt)
|
||||
{
|
||||
intel_gvt_restore_fence(gvt);
|
||||
intel_gvt_restore_mmio(gvt);
|
||||
intel_gvt_restore_ggtt(gvt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m)
|
||||
{
|
||||
int ret;
|
||||
void *gvt;
|
||||
|
||||
if (!intel_gvt_host.initialized)
|
||||
return -ENODEV;
|
||||
|
||||
if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
|
||||
m->type != INTEL_GVT_HYPERVISOR_XEN)
|
||||
return -EINVAL;
|
||||
|
||||
/* Get a reference for device model module */
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return -ENODEV;
|
||||
|
||||
intel_gvt_host.mpt = m;
|
||||
intel_gvt_host.hypervisor_type = m->type;
|
||||
gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
|
||||
|
||||
ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
|
||||
&intel_gvt_ops);
|
||||
if (ret < 0) {
|
||||
gvt_err("Failed to init %s hypervisor module\n",
|
||||
supported_hypervisors[intel_gvt_host.hypervisor_type]);
|
||||
module_put(THIS_MODULE);
|
||||
return -ENODEV;
|
||||
}
|
||||
gvt_dbg_core("Running with hypervisor %s in host mode\n",
|
||||
supported_hypervisors[intel_gvt_host.hypervisor_type]);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
|
||||
|
||||
void
|
||||
intel_gvt_unregister_hypervisor(void)
|
||||
{
|
||||
void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
|
||||
intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
|
|
@ -34,11 +34,13 @@
|
|||
#define _GVT_H_
|
||||
|
||||
#include <uapi/linux/pci_regs.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/vfio.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_gvt.h"
|
||||
|
||||
#include "debug.h"
|
||||
#include "hypercall.h"
|
||||
#include "mmio.h"
|
||||
#include "reg.h"
|
||||
#include "interrupt.h"
|
||||
|
@ -56,15 +58,6 @@
|
|||
|
||||
#define GVT_MAX_VGPU 8
|
||||
|
||||
struct intel_gvt_host {
|
||||
struct device *dev;
|
||||
bool initialized;
|
||||
int hypervisor_type;
|
||||
const struct intel_gvt_mpt *mpt;
|
||||
};
|
||||
|
||||
extern struct intel_gvt_host intel_gvt_host;
|
||||
|
||||
/* Describe per-platform limitations. */
|
||||
struct intel_gvt_device_info {
|
||||
u32 max_support_vgpus;
|
||||
|
@ -176,12 +169,14 @@ struct intel_vgpu_submission {
|
|||
} last_ctx[I915_NUM_ENGINES];
|
||||
};
|
||||
|
||||
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
|
||||
|
||||
struct intel_vgpu {
|
||||
struct intel_gvt *gvt;
|
||||
struct mutex vgpu_lock;
|
||||
int id;
|
||||
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
|
||||
bool active;
|
||||
bool attached;
|
||||
bool pv_notified;
|
||||
bool failsafe;
|
||||
unsigned int resetting_eng;
|
||||
|
@ -209,21 +204,40 @@ struct intel_vgpu {
|
|||
|
||||
struct dentry *debugfs;
|
||||
|
||||
/* Hypervisor-specific device state. */
|
||||
void *vdev;
|
||||
|
||||
struct list_head dmabuf_obj_list_head;
|
||||
struct mutex dmabuf_lock;
|
||||
struct idr object_idr;
|
||||
struct intel_vgpu_vblank_timer vblank_timer;
|
||||
|
||||
u32 scan_nonprivbb;
|
||||
};
|
||||
|
||||
static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
|
||||
{
|
||||
return vgpu->vdev;
|
||||
}
|
||||
struct vfio_device vfio_device;
|
||||
struct vfio_region *region;
|
||||
int num_regions;
|
||||
struct eventfd_ctx *intx_trigger;
|
||||
struct eventfd_ctx *msi_trigger;
|
||||
|
||||
/*
|
||||
* Two caches are used to avoid mapping duplicated pages (eg.
|
||||
* scratch pages). This help to reduce dma setup overhead.
|
||||
*/
|
||||
struct rb_root gfn_cache;
|
||||
struct rb_root dma_addr_cache;
|
||||
unsigned long nr_cache_entries;
|
||||
struct mutex cache_lock;
|
||||
|
||||
struct notifier_block iommu_notifier;
|
||||
struct notifier_block group_notifier;
|
||||
struct kvm *kvm;
|
||||
struct work_struct release_work;
|
||||
atomic_t released;
|
||||
struct vfio_group *vfio_group;
|
||||
|
||||
struct kvm_page_track_notifier_node track_node;
|
||||
#define NR_BKT (1 << 18)
|
||||
struct hlist_head ptable[NR_BKT];
|
||||
#undef NR_BKT
|
||||
};
|
||||
|
||||
/* validating GM healthy status*/
|
||||
#define vgpu_is_vm_unhealthy(ret_val) \
|
||||
|
@ -272,7 +286,7 @@ struct intel_gvt_mmio {
|
|||
/* Value of command write of this reg needs to be patched */
|
||||
#define F_CMD_WRITE_PATCH (1 << 8)
|
||||
|
||||
const struct gvt_mmio_block *mmio_block;
|
||||
struct gvt_mmio_block *mmio_block;
|
||||
unsigned int num_mmio_block;
|
||||
|
||||
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
|
||||
|
@ -428,7 +442,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
|
|||
#define RING_CTX_SIZE 320
|
||||
|
||||
struct intel_vgpu_creation_params {
|
||||
__u64 handle;
|
||||
__u64 low_gm_sz; /* in MB */
|
||||
__u64 high_gm_sz; /* in MB */
|
||||
__u64 fence_sz;
|
||||
|
@ -496,6 +509,9 @@ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
|
|||
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_gvt_set_opregion(struct intel_vgpu *vgpu);
|
||||
int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num);
|
||||
|
||||
/* validating GM functions */
|
||||
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
|
||||
((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
|
||||
|
@ -557,30 +573,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu);
|
|||
|
||||
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
|
||||
void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
|
||||
|
||||
struct intel_gvt_ops {
|
||||
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
|
||||
unsigned int);
|
||||
int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
|
||||
unsigned int);
|
||||
int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
|
||||
unsigned int);
|
||||
int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
|
||||
unsigned int);
|
||||
struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
|
||||
struct intel_vgpu_type *);
|
||||
void (*vgpu_destroy)(struct intel_vgpu *vgpu);
|
||||
void (*vgpu_release)(struct intel_vgpu *vgpu);
|
||||
void (*vgpu_reset)(struct intel_vgpu *);
|
||||
void (*vgpu_activate)(struct intel_vgpu *);
|
||||
void (*vgpu_deactivate)(struct intel_vgpu *);
|
||||
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
|
||||
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
|
||||
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
|
||||
unsigned int);
|
||||
void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
|
||||
};
|
||||
|
||||
void intel_vgpu_detach_regions(struct intel_vgpu *vgpu);
|
||||
|
||||
enum {
|
||||
GVT_FAILSAFE_UNSUPPORTED_GUEST,
|
||||
|
@ -724,13 +717,54 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
|
|||
return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_read_gpa - copy data from GPA to host data buffer
|
||||
* @vgpu: a vGPU
|
||||
* @gpa: guest physical address
|
||||
* @buf: host data buffer
|
||||
* @len: data length
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
|
||||
void *buf, unsigned long len)
|
||||
{
|
||||
if (!vgpu->attached)
|
||||
return -ESRCH;
|
||||
return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_write_gpa - copy data from host data buffer to GPA
|
||||
* @vgpu: a vGPU
|
||||
* @gpa: guest physical address
|
||||
* @buf: host data buffer
|
||||
* @len: data length
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
|
||||
unsigned long gpa, void *buf, unsigned long len)
|
||||
{
|
||||
if (!vgpu->attached)
|
||||
return -ESRCH;
|
||||
return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true);
|
||||
}
|
||||
|
||||
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
|
||||
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
|
||||
|
||||
int intel_gvt_pm_resume(struct intel_gvt *gvt);
|
||||
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn);
|
||||
int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn);
|
||||
int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr);
|
||||
int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
unsigned long size, dma_addr_t *dma_addr);
|
||||
void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
|
||||
dma_addr_t dma_addr);
|
||||
|
||||
#include "trace.h"
|
||||
#include "mpt.h"
|
||||
|
||||
#endif
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,82 +0,0 @@
|
|||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Eddie Dong <eddie.dong@intel.com>
|
||||
* Dexuan Cui
|
||||
* Jike Song <jike.song@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GVT_HYPERCALL_H_
|
||||
#define _GVT_HYPERCALL_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct device;
|
||||
|
||||
enum hypervisor_type {
|
||||
INTEL_GVT_HYPERVISOR_XEN = 0,
|
||||
INTEL_GVT_HYPERVISOR_KVM,
|
||||
};
|
||||
|
||||
/*
|
||||
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
|
||||
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
|
||||
*/
|
||||
struct intel_gvt_mpt {
|
||||
enum hypervisor_type type;
|
||||
int (*host_init)(struct device *dev, void *gvt, const void *ops);
|
||||
void (*host_exit)(struct device *dev, void *gvt);
|
||||
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
|
||||
void (*detach_vgpu)(void *vgpu);
|
||||
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
|
||||
unsigned long (*from_virt_to_mfn)(void *p);
|
||||
int (*enable_page_track)(unsigned long handle, u64 gfn);
|
||||
int (*disable_page_track)(unsigned long handle, u64 gfn);
|
||||
int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
|
||||
unsigned long len);
|
||||
int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
|
||||
unsigned long len);
|
||||
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
|
||||
|
||||
int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
|
||||
unsigned long size, dma_addr_t *dma_addr);
|
||||
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
|
||||
|
||||
int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
|
||||
|
||||
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
|
||||
unsigned long mfn, unsigned int nr, bool map);
|
||||
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
|
||||
bool map);
|
||||
int (*set_opregion)(void *vgpu);
|
||||
int (*set_edid)(void *vgpu, int port_num);
|
||||
int (*get_vfio_device)(void *vgpu);
|
||||
void (*put_vfio_device)(void *vgpu);
|
||||
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
|
||||
};
|
||||
|
||||
#endif /* _GVT_HYPERCALL_H_ */
|
|
@ -29,6 +29,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/eventfd.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
#include "gvt.h"
|
||||
|
@ -397,9 +399,45 @@ static void init_irq_map(struct intel_gvt_irq *irq)
|
|||
}
|
||||
|
||||
/* =======================vEvent injection===================== */
|
||||
|
||||
#define MSI_CAP_CONTROL(offset) (offset + 2)
|
||||
#define MSI_CAP_ADDRESS(offset) (offset + 4)
|
||||
#define MSI_CAP_DATA(offset) (offset + 8)
|
||||
#define MSI_CAP_EN 0x1
|
||||
|
||||
static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
|
||||
{
|
||||
return intel_gvt_hypervisor_inject_msi(vgpu);
|
||||
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
|
||||
u16 control, data;
|
||||
u32 addr;
|
||||
|
||||
control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
|
||||
addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
|
||||
data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
|
||||
|
||||
/* Do not generate MSI if MSIEN is disabled */
|
||||
if (!(control & MSI_CAP_EN))
|
||||
return 0;
|
||||
|
||||
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
|
||||
return -EINVAL;
|
||||
|
||||
trace_inject_msi(vgpu->id, addr, data);
|
||||
|
||||
/*
|
||||
* When guest is powered off, msi_trigger is set to NULL, but vgpu's
|
||||
* config and mmio register isn't restored to default during guest
|
||||
* poweroff. If this vgpu is still used in next vm, this vgpu's pipe
|
||||
* may be enabled, then once this vgpu is active, it will get inject
|
||||
* vblank interrupt request. But msi_trigger is null until msi is
|
||||
* enabled by guest. so if msi_trigger is null, success is still
|
||||
* returned and don't inject interrupt into guest.
|
||||
*/
|
||||
if (!vgpu->attached)
|
||||
return -ESRCH;
|
||||
if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void propagate_event(struct intel_gvt_irq *irq,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -139,7 +139,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
|
|||
}
|
||||
|
||||
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
|
||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
|
||||
ret = intel_gvt_read_gpa(vgpu, pa, p_data, bytes);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -215,7 +215,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
|
|||
}
|
||||
|
||||
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
|
||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
|
||||
ret = intel_gvt_write_gpa(vgpu, pa, p_data, bytes);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -72,7 +72,6 @@ struct intel_gvt_mmio_info {
|
|||
const struct intel_engine_cs *
|
||||
intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
|
||||
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
|
||||
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
|
||||
|
||||
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
|
||||
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
|
||||
|
|
|
@ -1,400 +0,0 @@
|
|||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Eddie Dong <eddie.dong@intel.com>
|
||||
* Dexuan Cui
|
||||
* Jike Song <jike.song@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GVT_MPT_H_
|
||||
#define _GVT_MPT_H_
|
||||
|
||||
#include "gvt.h"
|
||||
|
||||
/**
|
||||
* DOC: Hypervisor Service APIs for GVT-g Core Logic
|
||||
*
|
||||
* This is the glue layer between specific hypervisor MPT modules and GVT-g core
|
||||
* logic. Each kind of hypervisor MPT module provides a collection of function
|
||||
* callbacks and will be attached to GVT host when the driver is loading.
|
||||
* GVT-g core logic will call these APIs to request specific services from
|
||||
* hypervisor.
|
||||
*/
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_host_init - init GVT-g host side
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_host_init(struct device *dev,
|
||||
void *gvt, const void *ops)
|
||||
{
|
||||
if (!intel_gvt_host.mpt->host_init)
|
||||
return -ENODEV;
|
||||
|
||||
return intel_gvt_host.mpt->host_init(dev, gvt, ops);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
|
||||
*/
|
||||
static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
|
||||
{
|
||||
/* optional to provide */
|
||||
if (!intel_gvt_host.mpt->host_exit)
|
||||
return;
|
||||
|
||||
intel_gvt_host.mpt->host_exit(dev, gvt);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
|
||||
* related stuffs inside hypervisor.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
/* optional to provide */
|
||||
if (!intel_gvt_host.mpt->attach_vgpu)
|
||||
return 0;
|
||||
|
||||
return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
|
||||
* related stuffs inside hypervisor.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
/* optional to provide */
|
||||
if (!intel_gvt_host.mpt->detach_vgpu)
|
||||
return;
|
||||
|
||||
intel_gvt_host.mpt->detach_vgpu(vgpu);
|
||||
}
|
||||
|
||||
#define MSI_CAP_CONTROL(offset) (offset + 2)
|
||||
#define MSI_CAP_ADDRESS(offset) (offset + 4)
|
||||
#define MSI_CAP_DATA(offset) (offset + 8)
|
||||
#define MSI_CAP_EN 0x1
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
|
||||
{
|
||||
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
|
||||
u16 control, data;
|
||||
u32 addr;
|
||||
int ret;
|
||||
|
||||
control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
|
||||
addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
|
||||
data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
|
||||
|
||||
/* Do not generate MSI if MSIEN is disable */
|
||||
if (!(control & MSI_CAP_EN))
|
||||
return 0;
|
||||
|
||||
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
|
||||
return -EINVAL;
|
||||
|
||||
trace_inject_msi(vgpu->id, addr, data);
|
||||
|
||||
ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
|
||||
if (ret)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
|
||||
* @p: host kernel virtual address
|
||||
*
|
||||
* Returns:
|
||||
* MFN on success, INTEL_GVT_INVALID_ADDR if failed.
|
||||
*/
|
||||
static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
|
||||
{
|
||||
return intel_gvt_host.mpt->from_virt_to_mfn(p);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_enable_page_track - track a guest page
|
||||
* @vgpu: a vGPU
|
||||
* @gfn: the gfn of guest
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_enable_page_track(
|
||||
struct intel_vgpu *vgpu, unsigned long gfn)
|
||||
{
|
||||
return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_disable_page_track - untrack a guest page
|
||||
* @vgpu: a vGPU
|
||||
* @gfn: the gfn of guest
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_disable_page_track(
|
||||
struct intel_vgpu *vgpu, unsigned long gfn)
|
||||
{
|
||||
return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
|
||||
* @vgpu: a vGPU
|
||||
* @gpa: guest physical address
|
||||
* @buf: host data buffer
|
||||
* @len: data length
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
|
||||
unsigned long gpa, void *buf, unsigned long len)
|
||||
{
|
||||
return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
|
||||
* @vgpu: a vGPU
|
||||
* @gpa: guest physical address
|
||||
* @buf: host data buffer
|
||||
* @len: data length
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
|
||||
unsigned long gpa, void *buf, unsigned long len)
|
||||
{
|
||||
return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
|
||||
* @vgpu: a vGPU
|
||||
* @gpfn: guest pfn
|
||||
*
|
||||
* Returns:
|
||||
* MFN on success, INTEL_GVT_INVALID_ADDR if failed.
|
||||
*/
|
||||
static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
|
||||
struct intel_vgpu *vgpu, unsigned long gfn)
|
||||
{
|
||||
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
|
||||
* @vgpu: a vGPU
|
||||
* @gfn: guest pfn
|
||||
* @size: page size
|
||||
* @dma_addr: retrieve allocated dma addr
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_dma_map_guest_page(
|
||||
struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
|
||||
dma_addr_t *dma_addr)
|
||||
{
|
||||
return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
|
||||
dma_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
|
||||
* @vgpu: a vGPU
|
||||
* @dma_addr: the mapped dma addr
|
||||
*/
|
||||
static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
|
||||
struct intel_vgpu *vgpu, dma_addr_t dma_addr)
|
||||
{
|
||||
intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
|
||||
* @vgpu: a vGPU
|
||||
* @dma_addr: guest dma addr
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code if failed.
|
||||
*/
|
||||
static inline int
|
||||
intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
|
||||
* @vgpu: a vGPU
|
||||
* @gfn: guest PFN
|
||||
* @mfn: host PFN
|
||||
* @nr: amount of PFNs
|
||||
* @map: map or unmap
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
|
||||
struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
unsigned long mfn, unsigned int nr,
|
||||
bool map)
|
||||
{
|
||||
/* a MPT implementation could have MMIO mapped elsewhere */
|
||||
if (!intel_gvt_host.mpt->map_gfn_to_mfn)
|
||||
return 0;
|
||||
|
||||
return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
|
||||
map);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
|
||||
* @vgpu: a vGPU
|
||||
* @start: the beginning of the guest physical address region
|
||||
* @end: the end of the guest physical address region
|
||||
* @map: map or unmap
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_set_trap_area(
|
||||
struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
|
||||
{
|
||||
/* a MPT implementation could have MMIO trapped elsewhere */
|
||||
if (!intel_gvt_host.mpt->set_trap_area)
|
||||
return 0;
|
||||
|
||||
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_set_opregion - Set opregion for guest
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
|
||||
{
|
||||
if (!intel_gvt_host.mpt->set_opregion)
|
||||
return 0;
|
||||
|
||||
return intel_gvt_host.mpt->set_opregion(vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_set_edid - Set EDID region for guest
|
||||
* @vgpu: a vGPU
|
||||
* @port_num: display port number
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
|
||||
int port_num)
|
||||
{
|
||||
if (!intel_gvt_host.mpt->set_edid)
|
||||
return 0;
|
||||
|
||||
return intel_gvt_host.mpt->set_edid(vgpu, port_num);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
|
||||
{
|
||||
if (!intel_gvt_host.mpt->get_vfio_device)
|
||||
return 0;
|
||||
|
||||
return intel_gvt_host.mpt->get_vfio_device(vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
|
||||
{
|
||||
if (!intel_gvt_host.mpt->put_vfio_device)
|
||||
return;
|
||||
|
||||
intel_gvt_host.mpt->put_vfio_device(vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
|
||||
* @vgpu: a vGPU
|
||||
* @gfn: guest PFN
|
||||
*
|
||||
* Returns:
|
||||
* true on valid gfn, false on not.
|
||||
*/
|
||||
static inline bool intel_gvt_hypervisor_is_valid_gfn(
|
||||
struct intel_vgpu *vgpu, unsigned long gfn)
|
||||
{
|
||||
if (!intel_gvt_host.mpt->is_valid_gfn)
|
||||
return true;
|
||||
|
||||
return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
|
||||
}
|
||||
|
||||
int intel_gvt_register_hypervisor(const struct intel_gvt_mpt *);
|
||||
void intel_gvt_unregister_hypervisor(void);
|
||||
|
||||
#endif /* _GVT_MPT_H_ */
|
|
@ -255,33 +255,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
|
||||
{
|
||||
u64 mfn;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
|
||||
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
|
||||
+ i * PAGE_SIZE);
|
||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_vgpu_err("fail to get MFN from VA\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
|
||||
vgpu_opregion(vgpu)->gfn[i],
|
||||
mfn, 1, map);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
vgpu_opregion(vgpu)->mapped = map;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_opregion_base_write_handler - Opregion base register write handler
|
||||
*
|
||||
|
@ -294,34 +267,13 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
|
|||
int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
|
||||
{
|
||||
|
||||
int i, ret = 0;
|
||||
int i;
|
||||
|
||||
gvt_dbg_core("emulate opregion from kernel\n");
|
||||
|
||||
switch (intel_gvt_host.hypervisor_type) {
|
||||
case INTEL_GVT_HYPERVISOR_KVM:
|
||||
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
|
||||
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
|
||||
break;
|
||||
case INTEL_GVT_HYPERVISOR_XEN:
|
||||
/**
|
||||
* Wins guest on Xengt will write this register twice: xen
|
||||
* hvmloader and windows graphic driver.
|
||||
*/
|
||||
if (vgpu_opregion(vgpu)->mapped)
|
||||
map_vgpu_opregion(vgpu, false);
|
||||
|
||||
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
|
||||
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
|
||||
|
||||
ret = map_vgpu_opregion(vgpu, true);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
gvt_vgpu_err("not supported hypervisor\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
|
||||
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -336,12 +288,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
|
|||
if (!vgpu_opregion(vgpu)->va)
|
||||
return;
|
||||
|
||||
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
|
||||
if (vgpu_opregion(vgpu)->mapped)
|
||||
map_vgpu_opregion(vgpu, false);
|
||||
} else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
|
||||
/* Guest opregion is released by VFIO */
|
||||
}
|
||||
/* Guest opregion is released by VFIO */
|
||||
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
|
||||
get_order(INTEL_GVT_OPREGION_SIZE));
|
||||
|
||||
|
@ -470,39 +417,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
|
|||
u64 scic_pa = 0, parm_pa = 0;
|
||||
int ret;
|
||||
|
||||
switch (intel_gvt_host.hypervisor_type) {
|
||||
case INTEL_GVT_HYPERVISOR_XEN:
|
||||
scic = *((u32 *)vgpu_opregion(vgpu)->va +
|
||||
INTEL_GVT_OPREGION_SCIC);
|
||||
parm = *((u32 *)vgpu_opregion(vgpu)->va +
|
||||
INTEL_GVT_OPREGION_PARM);
|
||||
break;
|
||||
case INTEL_GVT_HYPERVISOR_KVM:
|
||||
scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
|
||||
INTEL_GVT_OPREGION_SCIC;
|
||||
parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
|
||||
INTEL_GVT_OPREGION_PARM;
|
||||
scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
|
||||
INTEL_GVT_OPREGION_SCIC;
|
||||
parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
|
||||
INTEL_GVT_OPREGION_PARM;
|
||||
ret = intel_gvt_read_gpa(vgpu, scic_pa, &scic, sizeof(scic));
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
|
||||
ret, scic_pa, sizeof(scic));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
|
||||
&scic, sizeof(scic));
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
|
||||
ret, scic_pa, sizeof(scic));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
|
||||
&parm, sizeof(parm));
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
|
||||
ret, scic_pa, sizeof(scic));
|
||||
return ret;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
gvt_vgpu_err("not supported hypervisor\n");
|
||||
return -EINVAL;
|
||||
ret = intel_gvt_read_gpa(vgpu, parm_pa, &parm, sizeof(parm));
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
|
||||
ret, scic_pa, sizeof(scic));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!(swsci & SWSCI_SCI_SELECT)) {
|
||||
|
@ -535,34 +465,18 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
|
|||
parm = 0;
|
||||
|
||||
out:
|
||||
switch (intel_gvt_host.hypervisor_type) {
|
||||
case INTEL_GVT_HYPERVISOR_XEN:
|
||||
*((u32 *)vgpu_opregion(vgpu)->va +
|
||||
INTEL_GVT_OPREGION_SCIC) = scic;
|
||||
*((u32 *)vgpu_opregion(vgpu)->va +
|
||||
INTEL_GVT_OPREGION_PARM) = parm;
|
||||
break;
|
||||
case INTEL_GVT_HYPERVISOR_KVM:
|
||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
|
||||
&scic, sizeof(scic));
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
|
||||
ret, scic_pa, sizeof(scic));
|
||||
return ret;
|
||||
}
|
||||
ret = intel_gvt_write_gpa(vgpu, scic_pa, &scic, sizeof(scic));
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
|
||||
ret, scic_pa, sizeof(scic));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
|
||||
&parm, sizeof(parm));
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
|
||||
ret, scic_pa, sizeof(scic));
|
||||
return ret;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
gvt_vgpu_err("not supported hypervisor\n");
|
||||
return -EINVAL;
|
||||
ret = intel_gvt_write_gpa(vgpu, parm_pa, &parm, sizeof(parm));
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
|
||||
ret, scic_pa, sizeof(scic));
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -87,7 +87,7 @@ void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
|
|||
track = radix_tree_delete(&vgpu->page_track_tree, gfn);
|
||||
if (track) {
|
||||
if (track->tracked)
|
||||
intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
|
||||
intel_gvt_page_track_remove(vgpu, gfn);
|
||||
kfree(track);
|
||||
}
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
|
|||
if (track->tracked)
|
||||
return 0;
|
||||
|
||||
ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn);
|
||||
ret = intel_gvt_page_track_add(vgpu, gfn);
|
||||
if (ret)
|
||||
return ret;
|
||||
track->tracked = true;
|
||||
|
@ -139,7 +139,7 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
|
|||
if (!track->tracked)
|
||||
return 0;
|
||||
|
||||
ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
|
||||
ret = intel_gvt_page_track_remove(vgpu, gfn);
|
||||
if (ret)
|
||||
return ret;
|
||||
track->tracked = false;
|
||||
|
@ -172,7 +172,7 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
|
|||
|
||||
if (unlikely(vgpu->failsafe)) {
|
||||
/* Remove write protection to prevent furture traps. */
|
||||
intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT);
|
||||
intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT);
|
||||
} else {
|
||||
ret = page_track->handler(page_track, gpa, data, bytes);
|
||||
if (ret)
|
||||
|
|
|
@ -132,6 +132,13 @@
|
|||
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
|
||||
#define VF_GUARDBAND _MMIO(0x83a4)
|
||||
|
||||
|
||||
#define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4)
|
||||
|
||||
/* XXX FIXME i915 has changed PP_XXX definition */
|
||||
#define PCH_PP_STATUS _MMIO(0xc7200)
|
||||
#define PCH_PP_CONTROL _MMIO(0xc7204)
|
||||
#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
|
||||
#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
|
||||
#define PCH_PP_DIVISOR _MMIO(0xc7210)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -150,10 +150,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
|||
|
||||
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
|
||||
#define COPY_REG(name) \
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
|
||||
intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
|
||||
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
|
||||
#define COPY_REG_MASKED(name) {\
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
|
||||
intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
|
||||
+ RING_CTX_OFF(name.val),\
|
||||
&shadow_ring_context->name.val, 4);\
|
||||
shadow_ring_context->name.val |= 0xffff << 16;\
|
||||
|
@ -167,7 +167,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
|||
COPY_REG(rcs_indirect_ctx);
|
||||
COPY_REG(rcs_indirect_ctx_offset);
|
||||
} else if (workload->engine->id == BCS0)
|
||||
intel_gvt_hypervisor_read_gpa(vgpu,
|
||||
intel_gvt_read_gpa(vgpu,
|
||||
workload->ring_context_gpa +
|
||||
BCS_TILE_REGISTER_VAL_OFFSET,
|
||||
(void *)shadow_ring_context +
|
||||
|
@ -178,7 +178,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
|||
/* don't copy Ring Context (the first 0x50 dwords),
|
||||
* only copy the Engine Context part from guest
|
||||
*/
|
||||
intel_gvt_hypervisor_read_gpa(vgpu,
|
||||
intel_gvt_read_gpa(vgpu,
|
||||
workload->ring_context_gpa +
|
||||
RING_CTX_SIZE,
|
||||
(void *)shadow_ring_context +
|
||||
|
@ -245,7 +245,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
|||
continue;
|
||||
|
||||
read:
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
|
||||
intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size);
|
||||
gpa_base = context_gpa;
|
||||
gpa_size = I915_GTT_PAGE_SIZE;
|
||||
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
|
||||
|
@ -911,8 +911,7 @@ static void update_guest_pdps(struct intel_vgpu *vgpu,
|
|||
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
intel_gvt_hypervisor_write_gpa(vgpu,
|
||||
gpa + i * 8, &pdp[7 - i], 4);
|
||||
intel_gvt_write_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4);
|
||||
}
|
||||
|
||||
static __maybe_unused bool
|
||||
|
@ -1007,13 +1006,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
|||
continue;
|
||||
|
||||
write:
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
|
||||
intel_gvt_write_gpa(vgpu, gpa_base, src, gpa_size);
|
||||
gpa_base = context_gpa;
|
||||
gpa_size = I915_GTT_PAGE_SIZE;
|
||||
src = context_base + (i << I915_GTT_PAGE_SHIFT);
|
||||
}
|
||||
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
|
||||
intel_gvt_write_gpa(vgpu, workload->ring_context_gpa +
|
||||
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
|
||||
|
||||
shadow_ring_context = (void *) ctx->lrc_reg_state;
|
||||
|
@ -1028,7 +1027,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
|||
}
|
||||
|
||||
#define COPY_REG(name) \
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
|
||||
intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \
|
||||
RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
|
||||
|
||||
COPY_REG(ctx_ctrl);
|
||||
|
@ -1036,7 +1035,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
|||
|
||||
#undef COPY_REG
|
||||
|
||||
intel_gvt_hypervisor_write_gpa(vgpu,
|
||||
intel_gvt_write_gpa(vgpu,
|
||||
workload->ring_context_gpa +
|
||||
sizeof(*shadow_ring_context),
|
||||
(void *)shadow_ring_context +
|
||||
|
@ -1573,7 +1572,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
|
|||
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
intel_gvt_hypervisor_read_gpa(vgpu,
|
||||
intel_gvt_read_gpa(vgpu,
|
||||
gpa + i * 8, &pdp[7 - i], 4);
|
||||
}
|
||||
|
||||
|
@ -1644,10 +1643,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(ring_header.val), &head, 4);
|
||||
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(ring_tail.val), &tail, 4);
|
||||
|
||||
guest_head = head;
|
||||
|
@ -1674,11 +1673,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
|
|||
gvt_dbg_el("ring %s begin a new workload\n", engine->name);
|
||||
|
||||
/* record some ring buffer register values for scan and shadow */
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(rb_start.val), &start, 4);
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
|
||||
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, start,
|
||||
|
@ -1701,9 +1700,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
|
|||
workload->rb_ctl = ctl;
|
||||
|
||||
if (engine->id == RCS0) {
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
|
||||
|
||||
workload->wa_ctx.indirect_ctx.guest_gma =
|
||||
|
|
|
@ -377,7 +377,7 @@ TRACE_EVENT(render_mmio,
|
|||
|
||||
/* This part must be out of protection */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/gvt
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
#include <trace/define_trace.h>
|
||||
|
|
|
@ -293,7 +293,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
|||
intel_vgpu_clean_opregion(vgpu);
|
||||
intel_vgpu_reset_ggtt(vgpu, true);
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
intel_gvt_hypervisor_detach_vgpu(vgpu);
|
||||
intel_vgpu_detach_regions(vgpu);
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
intel_vgpu_clean_mmio(vgpu);
|
||||
intel_vgpu_dmabuf_cleanup(vgpu);
|
||||
|
@ -370,8 +370,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
struct intel_vgpu *vgpu;
|
||||
int ret;
|
||||
|
||||
gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
|
||||
param->handle, param->low_gm_sz, param->high_gm_sz,
|
||||
gvt_dbg_core("low %llu MB high %llu MB fence %llu\n",
|
||||
param->low_gm_sz, param->high_gm_sz,
|
||||
param->fence_sz);
|
||||
|
||||
vgpu = vzalloc(sizeof(*vgpu));
|
||||
|
@ -384,7 +384,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
goto out_free_vgpu;
|
||||
|
||||
vgpu->id = ret;
|
||||
vgpu->handle = param->handle;
|
||||
vgpu->gvt = gvt;
|
||||
vgpu->sched_ctl.weight = param->weight;
|
||||
mutex_init(&vgpu->vgpu_lock);
|
||||
|
@ -405,13 +404,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
|
||||
populate_pvinfo_page(vgpu);
|
||||
|
||||
ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
|
||||
if (ret)
|
||||
goto out_clean_vgpu_resource;
|
||||
|
||||
ret = intel_vgpu_init_gtt(vgpu);
|
||||
if (ret)
|
||||
goto out_detach_hypervisor_vgpu;
|
||||
goto out_clean_vgpu_resource;
|
||||
|
||||
ret = intel_vgpu_init_opregion(vgpu);
|
||||
if (ret)
|
||||
|
@ -431,14 +426,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
|
||||
intel_gvt_debugfs_add_vgpu(vgpu);
|
||||
|
||||
ret = intel_gvt_hypervisor_set_opregion(vgpu);
|
||||
ret = intel_gvt_set_opregion(vgpu);
|
||||
if (ret)
|
||||
goto out_clean_sched_policy;
|
||||
|
||||
if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
|
||||
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
|
||||
ret = intel_gvt_set_edid(vgpu, PORT_B);
|
||||
else
|
||||
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
|
||||
ret = intel_gvt_set_edid(vgpu, PORT_D);
|
||||
if (ret)
|
||||
goto out_clean_sched_policy;
|
||||
|
||||
|
@ -454,8 +449,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
intel_vgpu_clean_opregion(vgpu);
|
||||
out_clean_gtt:
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
out_detach_hypervisor_vgpu:
|
||||
intel_gvt_hypervisor_detach_vgpu(vgpu);
|
||||
out_clean_vgpu_resource:
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
out_clean_vgpu_mmio:
|
||||
|
@ -483,7 +476,6 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
struct intel_vgpu_creation_params param;
|
||||
struct intel_vgpu *vgpu;
|
||||
|
||||
param.handle = 0;
|
||||
param.primary = 1;
|
||||
param.low_gm_sz = type->low_gm_size;
|
||||
param.high_gm_sz = type->high_gm_size;
|
||||
|
|
|
@ -468,11 +468,6 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
|
|||
pci_dev_put(dev_priv->bridge_dev);
|
||||
}
|
||||
|
||||
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_gvt_sanitize_options(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_set_dma_info - set all relevant PCI dma info as configured for the
|
||||
* platform
|
||||
|
@ -566,8 +561,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
}
|
||||
|
||||
intel_sanitize_options(dev_priv);
|
||||
|
||||
/* needs to be done before ggtt probe */
|
||||
intel_dram_edram_detect(dev_priv);
|
||||
|
||||
|
|
|
@ -432,6 +432,9 @@ struct i915_virtual_gpu {
|
|||
struct mutex lock; /* serialises sending of g2v_notify command pkts */
|
||||
bool active;
|
||||
u32 caps;
|
||||
u32 *initial_mmio;
|
||||
u8 *initial_cfg_space;
|
||||
struct list_head entry;
|
||||
};
|
||||
|
||||
struct i915_selftest_stash {
|
||||
|
|
|
@ -24,7 +24,10 @@
|
|||
#include "i915_drv.h"
|
||||
#include "i915_vgpu.h"
|
||||
#include "intel_gvt.h"
|
||||
#include "gvt/gvt.h"
|
||||
#include "gem/i915_gem_dmabuf.h"
|
||||
#include "gt/intel_context.h"
|
||||
#include "gt/intel_ring.h"
|
||||
#include "gt/shmem_utils.h"
|
||||
|
||||
/**
|
||||
* DOC: Intel GVT-g host support
|
||||
|
@ -41,6 +44,10 @@
|
|||
* doc is available on https://01.org/group/2230/documentation-list.
|
||||
*/
|
||||
|
||||
static LIST_HEAD(intel_gvt_devices);
|
||||
static const struct intel_vgpu_ops *intel_gvt_ops;
|
||||
static DEFINE_MUTEX(intel_gvt_mutex);
|
||||
|
||||
static bool is_supported_device(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
|
@ -59,33 +66,163 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_sanitize_options - sanitize GVT related options
|
||||
* @dev_priv: drm i915 private data
|
||||
*
|
||||
* This function is called at the i915 options sanitize stage.
|
||||
*/
|
||||
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
|
||||
static void free_initial_hw_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->params.enable_gvt)
|
||||
struct i915_virtual_gpu *vgpu = &dev_priv->vgpu;
|
||||
|
||||
vfree(vgpu->initial_mmio);
|
||||
vgpu->initial_mmio = NULL;
|
||||
|
||||
kfree(vgpu->initial_cfg_space);
|
||||
vgpu->initial_cfg_space = NULL;
|
||||
}
|
||||
|
||||
static void save_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
|
||||
u32 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = iter->i915;
|
||||
u32 *mmio, i;
|
||||
|
||||
for (i = offset; i < offset + size; i += 4) {
|
||||
mmio = iter->data + i;
|
||||
*mmio = intel_uncore_read_notrace(to_gt(dev_priv)->uncore,
|
||||
_MMIO(i));
|
||||
}
|
||||
}
|
||||
|
||||
static int handle_mmio(struct intel_gvt_mmio_table_iter *iter,
|
||||
u32 offset, u32 size)
|
||||
{
|
||||
if (WARN_ON(!IS_ALIGNED(offset, 4)))
|
||||
return -EINVAL;
|
||||
|
||||
save_mmio(iter, offset, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int save_initial_hw_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
struct i915_virtual_gpu *vgpu = &dev_priv->vgpu;
|
||||
struct intel_gvt_mmio_table_iter iter;
|
||||
void *mem;
|
||||
int i, ret;
|
||||
|
||||
mem = kzalloc(PCI_CFG_SPACE_EXP_SIZE, GFP_KERNEL);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
|
||||
vgpu->initial_cfg_space = mem;
|
||||
|
||||
for (i = 0; i < PCI_CFG_SPACE_EXP_SIZE; i += 4)
|
||||
pci_read_config_dword(pdev, i, mem + i);
|
||||
|
||||
mem = vzalloc(2 * SZ_1M);
|
||||
if (!mem) {
|
||||
ret = -ENOMEM;
|
||||
goto err_mmio;
|
||||
}
|
||||
|
||||
vgpu->initial_mmio = mem;
|
||||
|
||||
iter.i915 = dev_priv;
|
||||
iter.data = vgpu->initial_mmio;
|
||||
iter.handle_mmio_cb = handle_mmio;
|
||||
|
||||
ret = intel_gvt_iterate_mmio_table(&iter);
|
||||
if (ret)
|
||||
goto err_iterate;
|
||||
|
||||
return 0;
|
||||
|
||||
err_iterate:
|
||||
vfree(vgpu->initial_mmio);
|
||||
vgpu->initial_mmio = NULL;
|
||||
err_mmio:
|
||||
kfree(vgpu->initial_cfg_space);
|
||||
vgpu->initial_cfg_space = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->params.enable_gvt) {
|
||||
drm_dbg(&dev_priv->drm,
|
||||
"GVT-g is disabled by kernel params\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_vgpu_active(dev_priv)) {
|
||||
drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n");
|
||||
goto bail;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!is_supported_device(dev_priv)) {
|
||||
drm_info(&dev_priv->drm,
|
||||
"Unsupported device. GVT-g is disabled\n");
|
||||
goto bail;
|
||||
return;
|
||||
}
|
||||
|
||||
return;
|
||||
bail:
|
||||
dev_priv->params.enable_gvt = 0;
|
||||
if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Graphics virtualization is not yet supported with GuC submission\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (save_initial_hw_state(dev_priv)) {
|
||||
drm_dbg(&dev_priv->drm, "Failed to save initial HW state\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_gvt_ops->init_device(dev_priv))
|
||||
drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
|
||||
}
|
||||
|
||||
static void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (dev_priv->gvt)
|
||||
intel_gvt_ops->clean_device(dev_priv);
|
||||
free_initial_hw_state(dev_priv);
|
||||
}
|
||||
|
||||
int intel_gvt_set_ops(const struct intel_vgpu_ops *ops)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
|
||||
mutex_lock(&intel_gvt_mutex);
|
||||
if (intel_gvt_ops) {
|
||||
mutex_unlock(&intel_gvt_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
intel_gvt_ops = ops;
|
||||
|
||||
list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry)
|
||||
intel_gvt_init_device(dev_priv);
|
||||
mutex_unlock(&intel_gvt_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, I915_GVT);
|
||||
|
||||
void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
|
||||
mutex_lock(&intel_gvt_mutex);
|
||||
if (intel_gvt_ops != ops) {
|
||||
mutex_unlock(&intel_gvt_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry)
|
||||
intel_gvt_clean_device(dev_priv);
|
||||
|
||||
intel_gvt_ops = NULL;
|
||||
mutex_unlock(&intel_gvt_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, I915_GVT);
|
||||
|
||||
/**
|
||||
* intel_gvt_init - initialize GVT components
|
||||
* @dev_priv: drm i915 private data
|
||||
|
@ -98,39 +235,16 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
int intel_gvt_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (i915_inject_probe_failure(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
if (!dev_priv->params.enable_gvt) {
|
||||
drm_dbg(&dev_priv->drm,
|
||||
"GVT-g is disabled by kernel params\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ret = intel_gvt_init_device(dev_priv);
|
||||
if (ret) {
|
||||
drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
|
||||
goto bail;
|
||||
}
|
||||
mutex_lock(&intel_gvt_mutex);
|
||||
list_add_tail(&dev_priv->vgpu.entry, &intel_gvt_devices);
|
||||
if (intel_gvt_ops)
|
||||
intel_gvt_init_device(dev_priv);
|
||||
mutex_unlock(&intel_gvt_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
bail:
|
||||
dev_priv->params.enable_gvt = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return dev_priv->gvt;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -143,10 +257,10 @@ static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!intel_gvt_active(dev_priv))
|
||||
return;
|
||||
|
||||
mutex_lock(&intel_gvt_mutex);
|
||||
intel_gvt_clean_device(dev_priv);
|
||||
list_del(&dev_priv->vgpu.entry);
|
||||
mutex_unlock(&intel_gvt_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -159,6 +273,50 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_gvt_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (intel_gvt_active(dev_priv))
|
||||
intel_gvt_pm_resume(dev_priv->gvt);
|
||||
mutex_lock(&intel_gvt_mutex);
|
||||
if (dev_priv->gvt)
|
||||
intel_gvt_ops->pm_resume(dev_priv);
|
||||
mutex_unlock(&intel_gvt_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Exported here so that the exports only get created when GVT support is
|
||||
* actually enabled.
|
||||
*/
|
||||
EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_request_add, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_request_create, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_request_wait, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_vm_release, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(intel_context_create, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT);
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||
EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, I915_GVT);
|
||||
#endif
|
||||
EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT);
|
||||
EXPORT_SYMBOL_NS_GPL(i915_fence_ops, I915_GVT);
|
||||
|
|
|
@ -24,16 +24,34 @@
|
|||
#ifndef _INTEL_GVT_H_
|
||||
#define _INTEL_GVT_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_i915_private;
|
||||
|
||||
#ifdef CONFIG_DRM_I915_GVT
|
||||
|
||||
struct intel_gvt_mmio_table_iter {
|
||||
struct drm_i915_private *i915;
|
||||
void *data;
|
||||
int (*handle_mmio_cb)(struct intel_gvt_mmio_table_iter *iter,
|
||||
u32 offset, u32 size);
|
||||
};
|
||||
|
||||
int intel_gvt_init(struct drm_i915_private *dev_priv);
|
||||
void intel_gvt_driver_remove(struct drm_i915_private *dev_priv);
|
||||
int intel_gvt_init_device(struct drm_i915_private *dev_priv);
|
||||
void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
|
||||
int intel_gvt_init_host(void);
|
||||
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
|
||||
void intel_gvt_resume(struct drm_i915_private *dev_priv);
|
||||
int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter);
|
||||
|
||||
struct intel_vgpu_ops {
|
||||
int (*init_device)(struct drm_i915_private *dev_priv);
|
||||
void (*clean_device)(struct drm_i915_private *dev_priv);
|
||||
void (*pm_resume)(struct drm_i915_private *i915);
|
||||
};
|
||||
|
||||
int intel_gvt_set_ops(const struct intel_vgpu_ops *ops);
|
||||
void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops);
|
||||
|
||||
#else
|
||||
static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
|
@ -44,12 +62,16 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
|
||||
static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
|
||||
struct intel_gvt_mmio_table_iter {
|
||||
};
|
||||
|
||||
static inline int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -656,17 +656,12 @@ struct mdev_driver vfio_ccw_mdev_driver = {
|
|||
},
|
||||
.probe = vfio_ccw_mdev_probe,
|
||||
.remove = vfio_ccw_mdev_remove,
|
||||
};
|
||||
|
||||
static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.device_driver = &vfio_ccw_mdev_driver,
|
||||
.supported_type_groups = mdev_type_groups,
|
||||
};
|
||||
|
||||
int vfio_ccw_mdev_reg(struct subchannel *sch)
|
||||
{
|
||||
return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
|
||||
return mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
|
||||
}
|
||||
|
||||
void vfio_ccw_mdev_unreg(struct subchannel *sch)
|
||||
|
|
|
@ -1496,12 +1496,7 @@ static struct mdev_driver vfio_ap_matrix_driver = {
|
|||
},
|
||||
.probe = vfio_ap_mdev_probe,
|
||||
.remove = vfio_ap_mdev_remove,
|
||||
};
|
||||
|
||||
static const struct mdev_parent_ops vfio_ap_matrix_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.device_driver = &vfio_ap_matrix_driver,
|
||||
.supported_type_groups = vfio_ap_mdev_type_groups,
|
||||
.supported_type_groups = vfio_ap_mdev_type_groups,
|
||||
};
|
||||
|
||||
int vfio_ap_mdev_register(void)
|
||||
|
@ -1514,7 +1509,7 @@ int vfio_ap_mdev_register(void)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
|
||||
ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_driver);
|
||||
if (ret)
|
||||
goto err_driver;
|
||||
return 0;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o vfio_mdev.o
|
||||
mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o
|
||||
|
||||
obj-$(CONFIG_VFIO_MDEV) += mdev.o
|
||||
|
|
|
@ -89,17 +89,10 @@ void mdev_release_parent(struct kref *kref)
|
|||
static void mdev_device_remove_common(struct mdev_device *mdev)
|
||||
{
|
||||
struct mdev_parent *parent = mdev->type->parent;
|
||||
int ret;
|
||||
|
||||
mdev_remove_sysfs_files(mdev);
|
||||
device_del(&mdev->dev);
|
||||
lockdep_assert_held(&parent->unreg_sem);
|
||||
if (parent->ops->remove) {
|
||||
ret = parent->ops->remove(mdev);
|
||||
if (ret)
|
||||
dev_err(&mdev->dev, "Remove failed: err=%d\n", ret);
|
||||
}
|
||||
|
||||
/* Balances with device_initialize() */
|
||||
put_device(&mdev->dev);
|
||||
}
|
||||
|
@ -116,12 +109,12 @@ static int mdev_device_remove_cb(struct device *dev, void *data)
|
|||
/*
|
||||
* mdev_register_device : Register a device
|
||||
* @dev: device structure representing parent device.
|
||||
* @ops: Parent device operation structure to be registered.
|
||||
* @mdev_driver: Device driver to bind to the newly created mdev
|
||||
*
|
||||
* Add device to list of registered parent devices.
|
||||
* Returns a negative value on error, otherwise 0.
|
||||
*/
|
||||
int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
|
||||
int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver)
|
||||
{
|
||||
int ret;
|
||||
struct mdev_parent *parent;
|
||||
|
@ -129,9 +122,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
|
|||
char *envp[] = { env_string, NULL };
|
||||
|
||||
/* check for mandatory ops */
|
||||
if (!ops || !ops->supported_type_groups)
|
||||
return -EINVAL;
|
||||
if (!ops->device_driver && (!ops->create || !ops->remove))
|
||||
if (!mdev_driver->supported_type_groups)
|
||||
return -EINVAL;
|
||||
|
||||
dev = get_device(dev);
|
||||
|
@ -158,7 +149,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
|
|||
init_rwsem(&parent->unreg_sem);
|
||||
|
||||
parent->dev = dev;
|
||||
parent->ops = ops;
|
||||
parent->mdev_driver = mdev_driver;
|
||||
|
||||
if (!mdev_bus_compat_class) {
|
||||
mdev_bus_compat_class = class_compat_register("mdev_bus");
|
||||
|
@ -256,7 +247,7 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
|
|||
int ret;
|
||||
struct mdev_device *mdev, *tmp;
|
||||
struct mdev_parent *parent = type->parent;
|
||||
struct mdev_driver *drv = parent->ops->device_driver;
|
||||
struct mdev_driver *drv = parent->mdev_driver;
|
||||
|
||||
mutex_lock(&mdev_list_lock);
|
||||
|
||||
|
@ -278,7 +269,7 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
|
|||
mdev->dev.parent = parent->dev;
|
||||
mdev->dev.bus = &mdev_bus_type;
|
||||
mdev->dev.release = mdev_device_release;
|
||||
mdev->dev.groups = parent->ops->mdev_attr_groups;
|
||||
mdev->dev.groups = mdev_device_groups;
|
||||
mdev->type = type;
|
||||
/* Pairs with the put in mdev_device_release() */
|
||||
kobject_get(&type->kobj);
|
||||
|
@ -297,18 +288,10 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
|
|||
goto out_put_device;
|
||||
}
|
||||
|
||||
if (parent->ops->create) {
|
||||
ret = parent->ops->create(mdev);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = device_add(&mdev->dev);
|
||||
if (ret)
|
||||
goto out_remove;
|
||||
goto out_unlock;
|
||||
|
||||
if (!drv)
|
||||
drv = &vfio_mdev_driver;
|
||||
ret = device_driver_attach(&drv->driver, &mdev->dev);
|
||||
if (ret)
|
||||
goto out_del;
|
||||
|
@ -325,9 +308,6 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
|
|||
|
||||
out_del:
|
||||
device_del(&mdev->dev);
|
||||
out_remove:
|
||||
if (parent->ops->remove)
|
||||
parent->ops->remove(mdev);
|
||||
out_unlock:
|
||||
up_read(&parent->unreg_sem);
|
||||
out_put_device:
|
||||
|
@ -370,28 +350,14 @@ int mdev_device_remove(struct mdev_device *mdev)
|
|||
|
||||
static int __init mdev_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = mdev_bus_register();
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = mdev_register_driver(&vfio_mdev_driver);
|
||||
if (rc)
|
||||
goto err_bus;
|
||||
return 0;
|
||||
err_bus:
|
||||
mdev_bus_unregister();
|
||||
return rc;
|
||||
return bus_register(&mdev_bus_type);
|
||||
}
|
||||
|
||||
static void __exit mdev_exit(void)
|
||||
{
|
||||
mdev_unregister_driver(&vfio_mdev_driver);
|
||||
|
||||
if (mdev_bus_compat_class)
|
||||
class_compat_unregister(mdev_bus_compat_class);
|
||||
|
||||
mdev_bus_unregister();
|
||||
bus_unregister(&mdev_bus_type);
|
||||
}
|
||||
|
||||
subsys_initcall(mdev_init)
|
||||
|
|
|
@ -74,13 +74,3 @@ void mdev_unregister_driver(struct mdev_driver *drv)
|
|||
driver_unregister(&drv->driver);
|
||||
}
|
||||
EXPORT_SYMBOL(mdev_unregister_driver);
|
||||
|
||||
int mdev_bus_register(void)
|
||||
{
|
||||
return bus_register(&mdev_bus_type);
|
||||
}
|
||||
|
||||
void mdev_bus_unregister(void)
|
||||
{
|
||||
bus_unregister(&mdev_bus_type);
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ void mdev_bus_unregister(void);
|
|||
|
||||
struct mdev_parent {
|
||||
struct device *dev;
|
||||
const struct mdev_parent_ops *ops;
|
||||
struct mdev_driver *mdev_driver;
|
||||
struct kref ref;
|
||||
struct list_head next;
|
||||
struct kset *mdev_types_kset;
|
||||
|
@ -32,13 +32,13 @@ struct mdev_type {
|
|||
unsigned int type_group_id;
|
||||
};
|
||||
|
||||
extern const struct attribute_group *mdev_device_groups[];
|
||||
|
||||
#define to_mdev_type_attr(_attr) \
|
||||
container_of(_attr, struct mdev_type_attribute, attr)
|
||||
#define to_mdev_type(_kobj) \
|
||||
container_of(_kobj, struct mdev_type, kobj)
|
||||
|
||||
extern struct mdev_driver vfio_mdev_driver;
|
||||
|
||||
int parent_create_sysfs_files(struct mdev_parent *parent);
|
||||
void parent_remove_sysfs_files(struct mdev_parent *parent);
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
|
|||
{
|
||||
struct mdev_type *type;
|
||||
struct attribute_group *group =
|
||||
parent->ops->supported_type_groups[type_group_id];
|
||||
parent->mdev_driver->supported_type_groups[type_group_id];
|
||||
int ret;
|
||||
|
||||
if (!group->name) {
|
||||
|
@ -154,7 +154,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
|
|||
static void remove_mdev_supported_type(struct mdev_type *type)
|
||||
{
|
||||
struct attribute_group *group =
|
||||
type->parent->ops->supported_type_groups[type->type_group_id];
|
||||
type->parent->mdev_driver->supported_type_groups[type->type_group_id];
|
||||
|
||||
sysfs_remove_files(&type->kobj,
|
||||
(const struct attribute **)group->attrs);
|
||||
|
@ -168,7 +168,7 @@ static int add_mdev_supported_type_groups(struct mdev_parent *parent)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; parent->ops->supported_type_groups[i]; i++) {
|
||||
for (i = 0; parent->mdev_driver->supported_type_groups[i]; i++) {
|
||||
struct mdev_type *type;
|
||||
|
||||
type = add_mdev_supported_type(parent, i);
|
||||
|
@ -197,7 +197,6 @@ void parent_remove_sysfs_files(struct mdev_parent *parent)
|
|||
remove_mdev_supported_type(type);
|
||||
}
|
||||
|
||||
sysfs_remove_groups(&parent->dev->kobj, parent->ops->dev_attr_groups);
|
||||
kset_unregister(parent->mdev_types_kset);
|
||||
}
|
||||
|
||||
|
@ -213,17 +212,10 @@ int parent_create_sysfs_files(struct mdev_parent *parent)
|
|||
|
||||
INIT_LIST_HEAD(&parent->type_list);
|
||||
|
||||
ret = sysfs_create_groups(&parent->dev->kobj,
|
||||
parent->ops->dev_attr_groups);
|
||||
if (ret)
|
||||
goto create_err;
|
||||
|
||||
ret = add_mdev_supported_type_groups(parent);
|
||||
if (ret)
|
||||
sysfs_remove_groups(&parent->dev->kobj,
|
||||
parent->ops->dev_attr_groups);
|
||||
else
|
||||
return ret;
|
||||
goto create_err;
|
||||
return 0;
|
||||
|
||||
create_err:
|
||||
kset_unregister(parent->mdev_types_kset);
|
||||
|
@ -252,11 +244,20 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
static DEVICE_ATTR_WO(remove);
|
||||
|
||||
static const struct attribute *mdev_device_attrs[] = {
|
||||
static struct attribute *mdev_device_attrs[] = {
|
||||
&dev_attr_remove.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group mdev_device_group = {
|
||||
.attrs = mdev_device_attrs,
|
||||
};
|
||||
|
||||
const struct attribute_group *mdev_device_groups[] = {
|
||||
&mdev_device_group,
|
||||
NULL
|
||||
};
|
||||
|
||||
int mdev_create_sysfs_files(struct mdev_device *mdev)
|
||||
{
|
||||
struct mdev_type *type = mdev->type;
|
||||
|
@ -270,15 +271,8 @@ int mdev_create_sysfs_files(struct mdev_device *mdev)
|
|||
ret = sysfs_create_link(kobj, &type->kobj, "mdev_type");
|
||||
if (ret)
|
||||
goto type_link_failed;
|
||||
|
||||
ret = sysfs_create_files(kobj, mdev_device_attrs);
|
||||
if (ret)
|
||||
goto create_files_failed;
|
||||
|
||||
return ret;
|
||||
|
||||
create_files_failed:
|
||||
sysfs_remove_link(kobj, "mdev_type");
|
||||
type_link_failed:
|
||||
sysfs_remove_link(mdev->type->devices_kobj, dev_name(&mdev->dev));
|
||||
return ret;
|
||||
|
@ -288,7 +282,6 @@ void mdev_remove_sysfs_files(struct mdev_device *mdev)
|
|||
{
|
||||
struct kobject *kobj = &mdev->dev.kobj;
|
||||
|
||||
sysfs_remove_files(kobj, mdev_device_attrs);
|
||||
sysfs_remove_link(kobj, "mdev_type");
|
||||
sysfs_remove_link(mdev->type->devices_kobj, dev_name(&mdev->dev));
|
||||
}
|
||||
|
|
|
@ -1,152 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* VFIO based driver for Mediated device
|
||||
*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
* Author: Neo Jia <cjia@nvidia.com>
|
||||
* Kirti Wankhede <kwankhede@nvidia.com>
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vfio.h>
|
||||
#include <linux/mdev.h>
|
||||
|
||||
#include "mdev_private.h"
|
||||
|
||||
static int vfio_mdev_open_device(struct vfio_device *core_vdev)
|
||||
{
|
||||
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
|
||||
struct mdev_parent *parent = mdev->type->parent;
|
||||
|
||||
if (unlikely(!parent->ops->open_device))
|
||||
return 0;
|
||||
|
||||
return parent->ops->open_device(mdev);
|
||||
}
|
||||
|
||||
static void vfio_mdev_close_device(struct vfio_device *core_vdev)
|
||||
{
|
||||
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
|
||||
struct mdev_parent *parent = mdev->type->parent;
|
||||
|
||||
if (likely(parent->ops->close_device))
|
||||
parent->ops->close_device(mdev);
|
||||
}
|
||||
|
||||
static long vfio_mdev_unlocked_ioctl(struct vfio_device *core_vdev,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
|
||||
struct mdev_parent *parent = mdev->type->parent;
|
||||
|
||||
if (unlikely(!parent->ops->ioctl))
|
||||
return 0;
|
||||
|
||||
return parent->ops->ioctl(mdev, cmd, arg);
|
||||
}
|
||||
|
||||
static ssize_t vfio_mdev_read(struct vfio_device *core_vdev, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
|
||||
struct mdev_parent *parent = mdev->type->parent;
|
||||
|
||||
if (unlikely(!parent->ops->read))
|
||||
return -EINVAL;
|
||||
|
||||
return parent->ops->read(mdev, buf, count, ppos);
|
||||
}
|
||||
|
||||
static ssize_t vfio_mdev_write(struct vfio_device *core_vdev,
|
||||
const char __user *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
|
||||
struct mdev_parent *parent = mdev->type->parent;
|
||||
|
||||
if (unlikely(!parent->ops->write))
|
||||
return -EINVAL;
|
||||
|
||||
return parent->ops->write(mdev, buf, count, ppos);
|
||||
}
|
||||
|
||||
static int vfio_mdev_mmap(struct vfio_device *core_vdev,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
|
||||
struct mdev_parent *parent = mdev->type->parent;
|
||||
|
||||
if (unlikely(!parent->ops->mmap))
|
||||
return -EINVAL;
|
||||
|
||||
return parent->ops->mmap(mdev, vma);
|
||||
}
|
||||
|
||||
static void vfio_mdev_request(struct vfio_device *core_vdev, unsigned int count)
|
||||
{
|
||||
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
|
||||
struct mdev_parent *parent = mdev->type->parent;
|
||||
|
||||
if (parent->ops->request)
|
||||
parent->ops->request(mdev, count);
|
||||
else if (count == 0)
|
||||
dev_notice(mdev_dev(mdev),
|
||||
"No mdev vendor driver request callback support, blocked until released by user\n");
|
||||
}
|
||||
|
||||
static const struct vfio_device_ops vfio_mdev_dev_ops = {
|
||||
.name = "vfio-mdev",
|
||||
.open_device = vfio_mdev_open_device,
|
||||
.close_device = vfio_mdev_close_device,
|
||||
.ioctl = vfio_mdev_unlocked_ioctl,
|
||||
.read = vfio_mdev_read,
|
||||
.write = vfio_mdev_write,
|
||||
.mmap = vfio_mdev_mmap,
|
||||
.request = vfio_mdev_request,
|
||||
};
|
||||
|
||||
static int vfio_mdev_probe(struct mdev_device *mdev)
|
||||
{
|
||||
struct vfio_device *vdev;
|
||||
int ret;
|
||||
|
||||
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
|
||||
if (!vdev)
|
||||
return -ENOMEM;
|
||||
|
||||
vfio_init_group_dev(vdev, &mdev->dev, &vfio_mdev_dev_ops);
|
||||
ret = vfio_register_emulated_iommu_dev(vdev);
|
||||
if (ret)
|
||||
goto out_uninit;
|
||||
|
||||
dev_set_drvdata(&mdev->dev, vdev);
|
||||
return 0;
|
||||
|
||||
out_uninit:
|
||||
vfio_uninit_group_dev(vdev);
|
||||
kfree(vdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vfio_mdev_remove(struct mdev_device *mdev)
|
||||
{
|
||||
struct vfio_device *vdev = dev_get_drvdata(&mdev->dev);
|
||||
|
||||
vfio_unregister_group_dev(vdev);
|
||||
vfio_uninit_group_dev(vdev);
|
||||
kfree(vdev);
|
||||
}
|
||||
|
||||
struct mdev_driver vfio_mdev_driver = {
|
||||
.driver = {
|
||||
.name = "vfio_mdev",
|
||||
.owner = THIS_MODULE,
|
||||
.mod_name = KBUILD_MODNAME,
|
||||
},
|
||||
.probe = vfio_mdev_probe,
|
||||
.remove = vfio_mdev_remove,
|
||||
};
|
|
@ -15,7 +15,6 @@ struct mdev_type;
|
|||
struct mdev_device {
|
||||
struct device dev;
|
||||
guid_t uuid;
|
||||
void *driver_data;
|
||||
struct list_head next;
|
||||
struct mdev_type *type;
|
||||
bool active;
|
||||
|
@ -30,74 +29,6 @@ unsigned int mdev_get_type_group_id(struct mdev_device *mdev);
|
|||
unsigned int mtype_get_type_group_id(struct mdev_type *mtype);
|
||||
struct device *mtype_get_parent_dev(struct mdev_type *mtype);
|
||||
|
||||
/**
|
||||
* struct mdev_parent_ops - Structure to be registered for each parent device to
|
||||
* register the device to mdev module.
|
||||
*
|
||||
* @owner: The module owner.
|
||||
* @device_driver: Which device driver to probe() on newly created devices
|
||||
* @dev_attr_groups: Attributes of the parent device.
|
||||
* @mdev_attr_groups: Attributes of the mediated device.
|
||||
* @supported_type_groups: Attributes to define supported types. It is mandatory
|
||||
* to provide supported types.
|
||||
* @create: Called to allocate basic resources in parent device's
|
||||
* driver for a particular mediated device. It is
|
||||
* mandatory to provide create ops.
|
||||
* @mdev: mdev_device structure on of mediated device
|
||||
* that is being created
|
||||
* Returns integer: success (0) or error (< 0)
|
||||
* @remove: Called to free resources in parent device's driver for
|
||||
* a mediated device. It is mandatory to provide 'remove'
|
||||
* ops.
|
||||
* @mdev: mdev_device device structure which is being
|
||||
* destroyed
|
||||
* Returns integer: success (0) or error (< 0)
|
||||
* @read: Read emulation callback
|
||||
* @mdev: mediated device structure
|
||||
* @buf: read buffer
|
||||
* @count: number of bytes to read
|
||||
* @ppos: address.
|
||||
* Retuns number on bytes read on success or error.
|
||||
* @write: Write emulation callback
|
||||
* @mdev: mediated device structure
|
||||
* @buf: write buffer
|
||||
* @count: number of bytes to be written
|
||||
* @ppos: address.
|
||||
* Retuns number on bytes written on success or error.
|
||||
* @ioctl: IOCTL callback
|
||||
* @mdev: mediated device structure
|
||||
* @cmd: ioctl command
|
||||
* @arg: arguments to ioctl
|
||||
* @mmap: mmap callback
|
||||
* @mdev: mediated device structure
|
||||
* @vma: vma structure
|
||||
* @request: request callback to release device
|
||||
* @mdev: mediated device structure
|
||||
* @count: request sequence number
|
||||
* Parent device that support mediated device should be registered with mdev
|
||||
* module with mdev_parent_ops structure.
|
||||
**/
|
||||
struct mdev_parent_ops {
|
||||
struct module *owner;
|
||||
struct mdev_driver *device_driver;
|
||||
const struct attribute_group **dev_attr_groups;
|
||||
const struct attribute_group **mdev_attr_groups;
|
||||
struct attribute_group **supported_type_groups;
|
||||
|
||||
int (*create)(struct mdev_device *mdev);
|
||||
int (*remove)(struct mdev_device *mdev);
|
||||
int (*open_device)(struct mdev_device *mdev);
|
||||
void (*close_device)(struct mdev_device *mdev);
|
||||
ssize_t (*read)(struct mdev_device *mdev, char __user *buf,
|
||||
size_t count, loff_t *ppos);
|
||||
ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
|
||||
size_t count, loff_t *ppos);
|
||||
long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
|
||||
void (*request)(struct mdev_device *mdev, unsigned int count);
|
||||
};
|
||||
|
||||
/* interface for exporting mdev supported type attributes */
|
||||
struct mdev_type_attribute {
|
||||
struct attribute attr;
|
||||
|
@ -122,23 +53,18 @@ struct mdev_type_attribute mdev_type_attr_##_name = \
|
|||
* struct mdev_driver - Mediated device driver
|
||||
* @probe: called when new device created
|
||||
* @remove: called when device removed
|
||||
* @supported_type_groups: Attributes to define supported types. It is mandatory
|
||||
* to provide supported types.
|
||||
* @driver: device driver structure
|
||||
*
|
||||
**/
|
||||
struct mdev_driver {
|
||||
int (*probe)(struct mdev_device *dev);
|
||||
void (*remove)(struct mdev_device *dev);
|
||||
struct attribute_group **supported_type_groups;
|
||||
struct device_driver driver;
|
||||
};
|
||||
|
||||
static inline void *mdev_get_drvdata(struct mdev_device *mdev)
|
||||
{
|
||||
return mdev->driver_data;
|
||||
}
|
||||
static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data)
|
||||
{
|
||||
mdev->driver_data = data;
|
||||
}
|
||||
static inline const guid_t *mdev_uuid(struct mdev_device *mdev)
|
||||
{
|
||||
return &mdev->uuid;
|
||||
|
@ -146,7 +72,7 @@ static inline const guid_t *mdev_uuid(struct mdev_device *mdev)
|
|||
|
||||
extern struct bus_type mdev_bus_type;
|
||||
|
||||
int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops);
|
||||
int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver);
|
||||
void mdev_unregister_device(struct device *dev);
|
||||
|
||||
int mdev_register_driver(struct mdev_driver *drv);
|
||||
|
|
|
@ -1412,12 +1412,7 @@ static struct mdev_driver mbochs_driver = {
|
|||
},
|
||||
.probe = mbochs_probe,
|
||||
.remove = mbochs_remove,
|
||||
};
|
||||
|
||||
static const struct mdev_parent_ops mdev_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.device_driver = &mbochs_driver,
|
||||
.supported_type_groups = mdev_type_groups,
|
||||
.supported_type_groups = mdev_type_groups,
|
||||
};
|
||||
|
||||
static const struct file_operations vd_fops = {
|
||||
|
@ -1462,7 +1457,7 @@ static int __init mbochs_dev_init(void)
|
|||
if (ret)
|
||||
goto err_class;
|
||||
|
||||
ret = mdev_register_device(&mbochs_dev, &mdev_fops);
|
||||
ret = mdev_register_device(&mbochs_dev, &mbochs_driver);
|
||||
if (ret)
|
||||
goto err_device;
|
||||
|
||||
|
|
|
@ -723,12 +723,7 @@ static struct mdev_driver mdpy_driver = {
|
|||
},
|
||||
.probe = mdpy_probe,
|
||||
.remove = mdpy_remove,
|
||||
};
|
||||
|
||||
static const struct mdev_parent_ops mdev_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.device_driver = &mdpy_driver,
|
||||
.supported_type_groups = mdev_type_groups,
|
||||
.supported_type_groups = mdev_type_groups,
|
||||
};
|
||||
|
||||
static const struct file_operations vd_fops = {
|
||||
|
@ -771,7 +766,7 @@ static int __init mdpy_dev_init(void)
|
|||
if (ret)
|
||||
goto err_class;
|
||||
|
||||
ret = mdev_register_device(&mdpy_dev, &mdev_fops);
|
||||
ret = mdev_register_device(&mdpy_dev, &mdpy_driver);
|
||||
if (ret)
|
||||
goto err_device;
|
||||
|
||||
|
|
|
@ -1207,38 +1207,11 @@ static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd,
|
|||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
sample_mtty_dev_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "This is phy device\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(sample_mtty_dev);
|
||||
|
||||
static struct attribute *mtty_dev_attrs[] = {
|
||||
&dev_attr_sample_mtty_dev.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group mtty_dev_group = {
|
||||
.name = "mtty_dev",
|
||||
.attrs = mtty_dev_attrs,
|
||||
};
|
||||
|
||||
static const struct attribute_group *mtty_dev_groups[] = {
|
||||
&mtty_dev_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
if (mdev_from_dev(dev))
|
||||
return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
|
||||
|
||||
return sprintf(buf, "\n");
|
||||
return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(sample_mdev_dev);
|
||||
|
@ -1328,13 +1301,7 @@ static struct mdev_driver mtty_driver = {
|
|||
},
|
||||
.probe = mtty_probe,
|
||||
.remove = mtty_remove,
|
||||
};
|
||||
|
||||
static const struct mdev_parent_ops mdev_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.device_driver = &mtty_driver,
|
||||
.dev_attr_groups = mtty_dev_groups,
|
||||
.supported_type_groups = mdev_type_groups,
|
||||
.supported_type_groups = mdev_type_groups,
|
||||
};
|
||||
|
||||
static void mtty_device_release(struct device *dev)
|
||||
|
@ -1385,7 +1352,7 @@ static int __init mtty_dev_init(void)
|
|||
if (ret)
|
||||
goto err_class;
|
||||
|
||||
ret = mdev_register_device(&mtty_dev.dev, &mdev_fops);
|
||||
ret = mdev_register_device(&mtty_dev.dev, &mtty_driver);
|
||||
if (ret)
|
||||
goto err_device;
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue