mirror of https://gitee.com/openkylin/linux.git
drm/virtio: add dma sync for dma mapped virtio gpu framebuffer pages
With virtio gpu ttm-pages being dma mapped, dma sync is needed when swiotlb is used as bounce buffers, before TRANSFER_TO_HOST_2D/3D commands are sent. Signed-off-by: Jiandi An <jiandi.an@amd.com> Link: http://patchwork.freedesktop.org/patch/msgid/20180919070931.91168-1-jiandi.an@amd.com Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
This commit is contained in:
parent
3536faefc0
commit
8f44ca2233
|
@ -36,6 +36,7 @@
|
|||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/ttm/ttm_bo_api.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
@ -133,6 +134,13 @@ struct virtio_gpu_framebuffer {
|
|||
#define to_virtio_gpu_framebuffer(x) \
|
||||
container_of(x, struct virtio_gpu_framebuffer, base)
|
||||
|
||||
struct virtio_gpu_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct virtio_gpu_framebuffer vgfb;
|
||||
struct virtio_gpu_device *vgdev;
|
||||
struct delayed_work work;
|
||||
};
|
||||
|
||||
struct virtio_gpu_mman {
|
||||
struct ttm_bo_global_ref bo_global_ref;
|
||||
struct drm_global_reference mem_global_ref;
|
||||
|
|
|
@ -29,13 +29,6 @@
|
|||
|
||||
#define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
|
||||
|
||||
struct virtio_gpu_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct virtio_gpu_framebuffer vgfb;
|
||||
struct virtio_gpu_device *vgdev;
|
||||
struct delayed_work work;
|
||||
};
|
||||
|
||||
static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
|
||||
bool store, int x, int y,
|
||||
int width, int height)
|
||||
|
|
|
@ -490,6 +490,15 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
|
|||
{
|
||||
struct virtio_gpu_transfer_to_host_2d *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
|
||||
struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
|
||||
struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
|
||||
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
|
||||
|
||||
if (use_dma_api)
|
||||
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
|
||||
obj->pages->sgl, obj->pages->nents,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
||||
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||
|
@ -789,6 +798,15 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
|
|||
{
|
||||
struct virtio_gpu_transfer_host_3d *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
|
||||
struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
|
||||
struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
|
||||
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
|
||||
|
||||
if (use_dma_api)
|
||||
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
|
||||
obj->pages->sgl, obj->pages->nents,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
||||
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||
|
|
Loading…
Reference in New Issue