drm/i915/gvt: map pfn for PTE entry in kvm

When host i915 iommu enabled, gvt needs to use a mapped pfn in PTE entry
So before kvm returns the pfn, map this pfn and return the mapped address
which is so called iova.

Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
This commit is contained in:
Chuanxiao Dong 2017-02-09 11:38:01 +08:00 committed by Zhenyu Wang
parent 5de6bd4c23
commit b86dc6ed20
1 changed files with 49 additions and 13 deletions

View File

@ -77,7 +77,7 @@ struct kvmgt_guest_info {
struct gvt_dma { struct gvt_dma {
struct rb_node node; struct rb_node node;
gfn_t gfn; gfn_t gfn;
kvm_pfn_t pfn; unsigned long iova;
}; };
static inline bool handle_valid(unsigned long handle) static inline bool handle_valid(unsigned long handle)
@ -89,6 +89,35 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work); static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
unsigned long *iova)
{
struct page *page;
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
dma_addr_t daddr;
page = pfn_to_page(pfn);
if (is_error_page(page))
return -EFAULT;
daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr))
return -ENOMEM;
*iova = (unsigned long)(daddr >> PAGE_SHIFT);
return 0;
}
static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
{
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
dma_addr_t daddr;
daddr = (dma_addr_t)(iova << PAGE_SHIFT);
dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{ {
struct rb_node *node = vgpu->vdev.cache.rb_node; struct rb_node *node = vgpu->vdev.cache.rb_node;
@ -111,21 +140,22 @@ static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
return ret; return ret;
} }
static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{ {
struct gvt_dma *entry; struct gvt_dma *entry;
kvm_pfn_t pfn; unsigned long iova;
mutex_lock(&vgpu->vdev.cache_lock); mutex_lock(&vgpu->vdev.cache_lock);
entry = __gvt_cache_find(vgpu, gfn); entry = __gvt_cache_find(vgpu, gfn);
pfn = (entry == NULL) ? 0 : entry->pfn; iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
mutex_unlock(&vgpu->vdev.cache_lock); mutex_unlock(&vgpu->vdev.cache_lock);
return pfn; return iova;
} }
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
unsigned long iova)
{ {
struct gvt_dma *new, *itr; struct gvt_dma *new, *itr;
struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL; struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
@ -135,7 +165,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
return; return;
new->gfn = gfn; new->gfn = gfn;
new->pfn = pfn; new->iova = iova;
mutex_lock(&vgpu->vdev.cache_lock); mutex_lock(&vgpu->vdev.cache_lock);
while (*link) { while (*link) {
@ -182,6 +212,7 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
} }
g1 = gfn; g1 = gfn;
gvt_dma_unmap_iova(vgpu, this->iova);
rc = vfio_unpin_pages(dev, &g1, 1); rc = vfio_unpin_pages(dev, &g1, 1);
WARN_ON(rc != 1); WARN_ON(rc != 1);
__gvt_cache_remove_entry(vgpu, this); __gvt_cache_remove_entry(vgpu, this);
@ -204,6 +235,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
mutex_lock(&vgpu->vdev.cache_lock); mutex_lock(&vgpu->vdev.cache_lock);
while ((node = rb_first(&vgpu->vdev.cache))) { while ((node = rb_first(&vgpu->vdev.cache))) {
dma = rb_entry(node, struct gvt_dma, node); dma = rb_entry(node, struct gvt_dma, node);
gvt_dma_unmap_iova(vgpu, dma->iova);
gfn = dma->gfn; gfn = dma->gfn;
vfio_unpin_pages(dev, &gfn, 1); vfio_unpin_pages(dev, &gfn, 1);
@ -1348,7 +1380,7 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
{ {
unsigned long pfn; unsigned long iova, pfn;
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
struct device *dev; struct device *dev;
int rc; int rc;
@ -1357,9 +1389,9 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
return INTEL_GVT_INVALID_ADDR; return INTEL_GVT_INVALID_ADDR;
info = (struct kvmgt_guest_info *)handle; info = (struct kvmgt_guest_info *)handle;
pfn = gvt_cache_find(info->vgpu, gfn); iova = gvt_cache_find(info->vgpu, gfn);
if (pfn != 0) if (iova != INTEL_GVT_INVALID_ADDR)
return pfn; return iova;
pfn = INTEL_GVT_INVALID_ADDR; pfn = INTEL_GVT_INVALID_ADDR;
dev = mdev_dev(info->vgpu->vdev.mdev); dev = mdev_dev(info->vgpu->vdev.mdev);
@ -1368,9 +1400,13 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
return INTEL_GVT_INVALID_ADDR; return INTEL_GVT_INVALID_ADDR;
} }
/* transfer to host iova for GFX to use DMA */
rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
if (rc)
gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
gvt_cache_add(info->vgpu, gfn, pfn); gvt_cache_add(info->vgpu, gfn, iova);
return pfn; return iova;
} }
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,