mirror of https://gitee.com/openkylin/linux.git
drm-misc-next for 5.2:
UAPI Changes: - None Cross-subsystem Changes: - None Core Changes: - Fix compilation when CONFIG_FBDEV not selected (Daniel) Driver Changes: - virtio: package function args in virtio_gpu_object_params (Gerd) Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Gerd Hoffmann <kraxel@redhat.com> -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEfxcpfMSgdnQMs+QqlvcN/ahKBwoFAlydEOoACgkQlvcN/ahK BwoDbQf/c2KIamHInqOQ8LfRgzSXf8CYvJtra1JUJXqMXgiiyxy/wtW8Agbxt3ky o1gwBzJGsXG2lX/IpO4MqdyAhR7ZhsV0SYWkh66UQ2jiLkALCgk7X/y9fB36r0F9 9lvebabd/QT64Ef1CYg2GUa+OCZjE7vzNuyN7rw1L7eaKDkn5o45OCXAt62vy0gB bZmbNEnGgH1b7FhAiW7hiHOi874JWkPZ6eluTir8IzJZZRUuvhbqVHERA5d4kae9 1rWUBGBx6tC9JjSWKlspc/mNXteLJlzoUqgzueDklhQ9bPri0g7kgf7Au8VBRVam 5aWvefiIPdbIfToX2uqRVqP/XQW7zw== =ibiC -----END PGP SIGNATURE----- Merge tag 'drm-misc-next-2019-03-28-1' of git://anongit.freedesktop.org/drm/drm-misc into drm-next drm-misc-next for 5.2: UAPI Changes: - Remove unused DRM_DISPLAY_INFO_LEN (Ville) Cross-subsystem Changes: - None Core Changes: - Fix compilation when CONFIG_FBDEV not selected (Daniel) - fbdev: Make skip_vt_switch default (Daniel) - Merge fb_helper_fill_fix, fb_helper_fill_var into fb_helper_fill_info (Daniel) - Remove unused fields in connector, display_info, and edid_quirks (Ville) Driver Changes: - virtio: package function args in virtio_gpu_object_params (Gerd) - vkms: Fix potential NULL-dereference bug (Kangjie) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Sean Paul <sean@poorly.run> Link: https://patchwork.freedesktop.org/patch/msgid/20190328183045.GA44823@art_vandelay
This commit is contained in:
commit
b4e4538a0a
|
@ -17,7 +17,6 @@ Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,De
|
|||
,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
|
||||
,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
|
||||
,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
|
||||
,Optional,"""content type""",ENUM,"{ ""No Data"", ""Graphics"", ""Photo"", ""Cinema"", ""Game"" }",Connector,TBD
|
||||
i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
|
||||
,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
|
||||
,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
|
||||
|
|
|
|
@ -49,12 +49,11 @@
|
|||
static int
|
||||
amdgpufb_open(struct fb_info *info, int user)
|
||||
{
|
||||
struct amdgpu_fbdev *rfbdev = info->par;
|
||||
struct amdgpu_device *adev = rfbdev->adev;
|
||||
int ret = pm_runtime_get_sync(adev->ddev->dev);
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
int ret = pm_runtime_get_sync(fb_helper->dev->dev);
|
||||
if (ret < 0 && ret != -EACCES) {
|
||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
||||
pm_runtime_mark_last_busy(fb_helper->dev->dev);
|
||||
pm_runtime_put_autosuspend(fb_helper->dev->dev);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
|
@ -63,11 +62,10 @@ amdgpufb_open(struct fb_info *info, int user)
|
|||
static int
|
||||
amdgpufb_release(struct fb_info *info, int user)
|
||||
{
|
||||
struct amdgpu_fbdev *rfbdev = info->par;
|
||||
struct amdgpu_device *adev = rfbdev->adev;
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
|
||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
||||
pm_runtime_mark_last_busy(fb_helper->dev->dev);
|
||||
pm_runtime_put_autosuspend(fb_helper->dev->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -233,9 +231,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
|||
goto out;
|
||||
}
|
||||
|
||||
info->par = rfbdev;
|
||||
info->skip_vt_switch = true;
|
||||
|
||||
ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
|
||||
&mode_cmd, gobj);
|
||||
if (ret) {
|
||||
|
@ -248,10 +243,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
|||
/* setup helper */
|
||||
rfbdev->helper.fb = fb;
|
||||
|
||||
strcpy(info->fix.id, "amdgpudrmfb");
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
|
||||
info->fbops = &amdgpufb_ops;
|
||||
|
||||
tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
|
||||
|
@ -260,7 +251,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
|||
info->screen_base = amdgpu_bo_kptr(abo);
|
||||
info->screen_size = amdgpu_bo_size(abo);
|
||||
|
||||
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
|
||||
|
||||
/* setup aperture base/size for vesafb takeover */
|
||||
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
|
||||
|
|
|
@ -78,8 +78,6 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
|
|||
goto err_fballoc;
|
||||
}
|
||||
|
||||
strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
|
||||
info->par = fbh;
|
||||
info->fbops = &armada_fb_ops;
|
||||
info->fix.smem_start = obj->phys_addr;
|
||||
info->fix.smem_len = obj->obj.size;
|
||||
|
@ -87,9 +85,7 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
|
|||
info->screen_base = ptr;
|
||||
fbh->fb = &dfb->fb;
|
||||
|
||||
drm_fb_helper_fill_fix(info, dfb->fb.pitches[0],
|
||||
dfb->fb.format->depth);
|
||||
drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, fbh, sizes);
|
||||
|
||||
DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
|
||||
dfb->fb.width, dfb->fb.height, dfb->fb.format->cpp[0] * 8,
|
||||
|
|
|
@ -259,7 +259,7 @@ struct ast_framebuffer {
|
|||
};
|
||||
|
||||
struct ast_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct drm_fb_helper helper; /* must be first */
|
||||
struct ast_framebuffer afb;
|
||||
void *sysram;
|
||||
int size;
|
||||
|
|
|
@ -217,8 +217,6 @@ static int astfb_create(struct drm_fb_helper *helper,
|
|||
ret = PTR_ERR(info);
|
||||
goto out;
|
||||
}
|
||||
info->par = afbdev;
|
||||
|
||||
ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -229,15 +227,12 @@ static int astfb_create(struct drm_fb_helper *helper,
|
|||
fb = &afbdev->afb.base;
|
||||
afbdev->helper.fb = fb;
|
||||
|
||||
strcpy(info->fix.id, "astdrmfb");
|
||||
|
||||
info->fbops = &astfb_ops;
|
||||
|
||||
info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
|
||||
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, &afbdev->helper, sizes);
|
||||
|
||||
info->screen_base = sysram;
|
||||
info->screen_size = size;
|
||||
|
|
|
@ -143,7 +143,7 @@ struct cirrus_device {
|
|||
|
||||
|
||||
struct cirrus_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct drm_fb_helper helper; /* must be first */
|
||||
struct drm_framebuffer *gfb;
|
||||
void *sysram;
|
||||
int size;
|
||||
|
|
|
@ -195,8 +195,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
|
|||
goto err_vfree;
|
||||
}
|
||||
|
||||
info->par = gfbdev;
|
||||
|
||||
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
|
||||
if (!fb) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -214,13 +212,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
|
|||
/* setup helper */
|
||||
gfbdev->helper.fb = fb;
|
||||
|
||||
strcpy(info->fix.id, "cirrusdrmfb");
|
||||
|
||||
info->fbops = &cirrusfb_ops;
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
|
||||
sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, &gfbdev->helper, sizes);
|
||||
|
||||
/* setup aperture base/size for vesafb takeover */
|
||||
info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
|
||||
|
|
|
@ -68,8 +68,6 @@
|
|||
* maximum size and use that.
|
||||
*/
|
||||
#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
|
||||
/* Monitor forgot to set the first detailed is preferred bit. */
|
||||
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
|
||||
/* use +hsync +vsync for detailed mode */
|
||||
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
|
||||
/* Force reduced-blanking timings for detailed modes */
|
||||
|
@ -107,8 +105,6 @@ static const struct edid_quirk {
|
|||
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
|
||||
/* Acer F51 */
|
||||
{ "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
|
||||
/* Unknown Acer */
|
||||
{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
|
||||
|
||||
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
|
||||
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
|
||||
|
@ -145,12 +141,6 @@ static const struct edid_quirk {
|
|||
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
|
||||
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
|
||||
|
||||
/* Philips 107p5 CRT */
|
||||
{ "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
|
||||
|
||||
/* Proview AY765C */
|
||||
{ "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
|
||||
|
||||
/* Samsung SyncMaster 205BW. Note: irony */
|
||||
{ "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
|
||||
/* Samsung SyncMaster 22[5-6]BW */
|
||||
|
|
|
@ -934,6 +934,7 @@ struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
|
|||
}
|
||||
|
||||
fb_helper->fbdev = info;
|
||||
info->skip_vt_switch = true;
|
||||
|
||||
return info;
|
||||
|
||||
|
@ -2036,21 +2037,8 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_fb_helper_fill_fix - initializes fixed fbdev information
|
||||
* @info: fbdev registered by the helper
|
||||
* @pitch: desired pitch
|
||||
* @depth: desired depth
|
||||
*
|
||||
* Helper to fill in the fixed fbdev information useful for a non-accelerated
|
||||
* fbdev emulations. Drivers which support acceleration methods which impose
|
||||
* additional constraints need to set up their own limits.
|
||||
*
|
||||
* Drivers should call this (or their equivalent setup code) from their
|
||||
* &drm_fb_helper_funcs.fb_probe callback.
|
||||
*/
|
||||
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
|
||||
uint32_t depth)
|
||||
static void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
|
||||
uint32_t depth)
|
||||
{
|
||||
info->fix.type = FB_TYPE_PACKED_PIXELS;
|
||||
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
|
||||
|
@ -2065,24 +2053,10 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
|
|||
|
||||
info->fix.line_length = pitch;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_fill_var - initalizes variable fbdev information
|
||||
* @info: fbdev instance to set up
|
||||
* @fb_helper: fb helper instance to use as template
|
||||
* @fb_width: desired fb width
|
||||
* @fb_height: desired fb height
|
||||
*
|
||||
* Sets up the variable fbdev metainformation from the given fb helper instance
|
||||
* and the drm framebuffer allocated in &drm_fb_helper.fb.
|
||||
*
|
||||
* Drivers should call this (or their equivalent setup code) from their
|
||||
* &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
|
||||
* backing storage framebuffer.
|
||||
*/
|
||||
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
|
||||
uint32_t fb_width, uint32_t fb_height)
|
||||
static void drm_fb_helper_fill_var(struct fb_info *info,
|
||||
struct drm_fb_helper *fb_helper,
|
||||
uint32_t fb_width, uint32_t fb_height)
|
||||
{
|
||||
struct drm_framebuffer *fb = fb_helper->fb;
|
||||
|
||||
|
@ -2102,7 +2076,36 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
|
|||
info->var.xres = fb_width;
|
||||
info->var.yres = fb_height;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_fill_var);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_fill_info - initializes fbdev information
|
||||
* @info: fbdev instance to set up
|
||||
* @fb_helper: fb helper instance to use as template
|
||||
* @sizes: describes fbdev size and scanout surface size
|
||||
*
|
||||
* Sets up the variable and fixed fbdev metainformation from the given fb helper
|
||||
* instance and the drm framebuffer allocated in &drm_fb_helper.fb.
|
||||
*
|
||||
* Drivers should call this (or their equivalent setup code) from their
|
||||
* &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
|
||||
* backing storage framebuffer.
|
||||
*/
|
||||
void drm_fb_helper_fill_info(struct fb_info *info,
|
||||
struct drm_fb_helper *fb_helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct drm_framebuffer *fb = fb_helper->fb;
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(info, fb_helper,
|
||||
sizes->fb_width, sizes->fb_height);
|
||||
|
||||
info->par = fb_helper;
|
||||
snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb",
|
||||
fb_helper->dev->driver->name);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_fill_info);
|
||||
|
||||
static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
|
||||
uint32_t maxX,
|
||||
|
@ -2780,9 +2783,8 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
|
|||
*
|
||||
* This function will call down into the &drm_fb_helper_funcs.fb_probe callback
|
||||
* to let the driver allocate and initialize the fbdev info structure and the
|
||||
* drm framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
|
||||
* drm_fb_helper_fill_fix() are provided as helpers to setup simple default
|
||||
* values for the fbdev info structure.
|
||||
* drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided
|
||||
* as a helper to setup simple default values for the fbdev info structure.
|
||||
*
|
||||
* HANG DEBUGGING:
|
||||
*
|
||||
|
@ -3151,7 +3153,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
|
|||
if (IS_ERR(fbi))
|
||||
return PTR_ERR(fbi);
|
||||
|
||||
fbi->par = fb_helper;
|
||||
fbi->fbops = &drm_fbdev_fb_ops;
|
||||
fbi->screen_size = fb->height * fb->pitches[0];
|
||||
fbi->fix.smem_len = fbi->screen_size;
|
||||
|
@ -3162,10 +3163,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
|
|||
fbi->fix.smem_start =
|
||||
page_to_phys(virt_to_page(fbi->screen_buffer));
|
||||
#endif
|
||||
strcpy(fbi->fix.id, "DRM emulated");
|
||||
|
||||
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(fbi, fb_helper, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(fbi, fb_helper, sizes);
|
||||
|
||||
if (fb->funcs->dirty) {
|
||||
struct fb_ops *fbops;
|
||||
|
|
|
@ -87,11 +87,9 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
|
|||
return PTR_ERR(fbi);
|
||||
}
|
||||
|
||||
fbi->par = helper;
|
||||
fbi->fbops = &exynos_drm_fb_ops;
|
||||
|
||||
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(fbi, helper, sizes);
|
||||
|
||||
nr_pages = exynos_gem->size >> PAGE_SHIFT;
|
||||
|
||||
|
|
|
@ -389,7 +389,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
|
|||
ret = PTR_ERR(info);
|
||||
goto out;
|
||||
}
|
||||
info->par = fbdev;
|
||||
|
||||
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
|
||||
|
||||
|
@ -402,9 +401,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
|
|||
|
||||
fbdev->psb_fb_helper.fb = fb;
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
strcpy(info->fix.id, "psbdrmfb");
|
||||
|
||||
if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
|
||||
info->fbops = &psbfb_ops;
|
||||
else if (gtt_roll) { /* GTT rolling seems best */
|
||||
|
@ -427,8 +423,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
|
|||
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
|
||||
}
|
||||
|
||||
drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
|
||||
sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, &fbdev->psb_fb_helper, sizes);
|
||||
|
||||
info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
|
||||
info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
|
||||
|
|
|
@ -34,7 +34,7 @@ struct psb_framebuffer {
|
|||
};
|
||||
|
||||
struct psb_fbdev {
|
||||
struct drm_fb_helper psb_fb_helper;
|
||||
struct drm_fb_helper psb_fb_helper; /* must be first */
|
||||
struct psb_framebuffer pfb;
|
||||
};
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ struct hibmc_framebuffer {
|
|||
};
|
||||
|
||||
struct hibmc_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct drm_fb_helper helper; /* must be first */
|
||||
struct hibmc_framebuffer *fb;
|
||||
int size;
|
||||
};
|
||||
|
|
|
@ -116,8 +116,6 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
|
|||
goto out_release_fbi;
|
||||
}
|
||||
|
||||
info->par = hi_fbdev;
|
||||
|
||||
hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
|
||||
if (IS_ERR(hi_fbdev->fb)) {
|
||||
ret = PTR_ERR(hi_fbdev->fb);
|
||||
|
@ -129,14 +127,9 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
|
|||
priv->fbdev->size = size;
|
||||
hi_fbdev->helper.fb = &hi_fbdev->fb->fb;
|
||||
|
||||
strcpy(info->fix.id, "hibmcdrmfb");
|
||||
|
||||
info->fbops = &hibmc_drm_fb_ops;
|
||||
|
||||
drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0],
|
||||
hi_fbdev->fb->fb.format->depth);
|
||||
drm_fb_helper_fill_var(info, &priv->fbdev->helper, sizes->fb_width,
|
||||
sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, &priv->fbdev->helper, sizes);
|
||||
|
||||
info->screen_base = bo->kmap.virtual;
|
||||
info->screen_size = size;
|
||||
|
|
|
@ -33,17 +33,10 @@ static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *con
|
|||
return MODE_OK;
|
||||
}
|
||||
|
||||
static struct drm_encoder *
|
||||
hibmc_connector_best_encoder(struct drm_connector *connector)
|
||||
{
|
||||
return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs
|
||||
hibmc_connector_helper_funcs = {
|
||||
.get_modes = hibmc_connector_get_modes,
|
||||
.mode_valid = hibmc_connector_mode_valid,
|
||||
.best_encoder = hibmc_connector_best_encoder,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs hibmc_connector_funcs = {
|
||||
|
|
|
@ -2866,7 +2866,6 @@ static void intel_connector_info(struct seq_file *m,
|
|||
if (connector->status == connector_status_disconnected)
|
||||
return;
|
||||
|
||||
seq_printf(m, "\tname: %s\n", connector->display_info.name);
|
||||
seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
|
||||
connector->display_info.width_mm,
|
||||
connector->display_info.height_mm);
|
||||
|
|
|
@ -235,12 +235,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
goto out_unpin;
|
||||
}
|
||||
|
||||
info->par = helper;
|
||||
|
||||
ifbdev->helper.fb = fb;
|
||||
|
||||
strcpy(info->fix.id, "inteldrmfb");
|
||||
|
||||
info->fbops = &intelfb_ops;
|
||||
|
||||
/* setup aperture base/size for vesafb takeover */
|
||||
|
@ -259,11 +255,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
info->screen_base = vaddr;
|
||||
info->screen_size = vma->node.size;
|
||||
|
||||
/* This driver doesn't need a VT switch to restore the mode on resume */
|
||||
info->skip_vt_switch = true;
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
|
||||
|
||||
/* If the object is shmemfs backed, it will have given us zeroed pages.
|
||||
* If the object is stolen however, it will be full of whatever
|
||||
|
|
|
@ -113,7 +113,7 @@ struct mga_framebuffer {
|
|||
};
|
||||
|
||||
struct mga_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct drm_fb_helper helper; /* must be first */
|
||||
struct mga_framebuffer mfb;
|
||||
void *sysram;
|
||||
int size;
|
||||
|
|
|
@ -195,8 +195,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
|
|||
goto err_alloc_fbi;
|
||||
}
|
||||
|
||||
info->par = mfbdev;
|
||||
|
||||
ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
|
||||
if (ret)
|
||||
goto err_alloc_fbi;
|
||||
|
@ -209,17 +207,13 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
|
|||
/* setup helper */
|
||||
mfbdev->helper.fb = fb;
|
||||
|
||||
strcpy(info->fix.id, "mgadrmfb");
|
||||
|
||||
info->fbops = &mgag200fb_ops;
|
||||
|
||||
/* setup aperture base/size for vesafb takeover */
|
||||
info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
|
||||
info->apertures->ranges[0].size = mdev->mc.vram_size;
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
|
||||
sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, &mfbdev->helper, sizes);
|
||||
|
||||
info->screen_base = sysram;
|
||||
info->screen_size = size;
|
||||
|
|
|
@ -122,13 +122,9 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
|||
fbdev->fb = fb;
|
||||
helper->fb = fb;
|
||||
|
||||
fbi->par = helper;
|
||||
fbi->fbops = &msm_fb_ops;
|
||||
|
||||
strcpy(fbi->fix.id, "msm");
|
||||
|
||||
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(fbi, helper, sizes);
|
||||
|
||||
dev->mode_config.fb_base = paddr;
|
||||
|
||||
|
|
|
@ -365,14 +365,10 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
|||
ret = PTR_ERR(info);
|
||||
goto out_unlock;
|
||||
}
|
||||
info->skip_vt_switch = 1;
|
||||
|
||||
info->par = fbcon;
|
||||
|
||||
/* setup helper */
|
||||
fbcon->helper.fb = &fb->base;
|
||||
|
||||
strcpy(info->fix.id, "nouveaufb");
|
||||
if (!chan)
|
||||
info->flags = FBINFO_HWACCEL_DISABLED;
|
||||
else
|
||||
|
@ -387,9 +383,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
|||
info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
|
||||
info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->base.pitches[0],
|
||||
fb->base.format->depth);
|
||||
drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
|
||||
|
||||
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#include "nouveau_display.h"
|
||||
|
||||
struct nouveau_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct drm_fb_helper helper; /* must be first */
|
||||
unsigned int saved_flags;
|
||||
struct nvif_object surf2d;
|
||||
struct nvif_object clip;
|
||||
|
|
|
@ -183,13 +183,9 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
|
|||
fbdev->fb = fb;
|
||||
helper->fb = fb;
|
||||
|
||||
fbi->par = helper;
|
||||
fbi->fbops = &omap_fb_ops;
|
||||
|
||||
strcpy(fbi->fix.id, MODULE_NAME);
|
||||
|
||||
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(fbi, helper, sizes);
|
||||
|
||||
dev->mode_config.fb_base = dma_addr;
|
||||
|
||||
|
|
|
@ -264,8 +264,6 @@ static int versatile_panel_get_modes(struct drm_panel *panel)
|
|||
struct versatile_panel *vpanel = to_versatile_panel(panel);
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
strncpy(connector->display_info.name, vpanel->panel_type->name,
|
||||
DRM_DISPLAY_INFO_LEN);
|
||||
connector->display_info.width_mm = vpanel->panel_type->width_mm;
|
||||
connector->display_info.height_mm = vpanel->panel_type->height_mm;
|
||||
connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
|
||||
|
|
|
@ -662,8 +662,6 @@ static int ili9322_get_modes(struct drm_panel *panel)
|
|||
struct ili9322 *ili = panel_to_ili9322(panel);
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
strncpy(connector->display_info.name, "ILI9322 TFT LCD driver\0",
|
||||
DRM_DISPLAY_INFO_LEN);
|
||||
connector->display_info.width_mm = ili->conf->width_mm;
|
||||
connector->display_info.height_mm = ili->conf->height_mm;
|
||||
|
||||
|
|
|
@ -190,7 +190,6 @@ static int lcd_olinuxino_get_modes(struct drm_panel *panel)
|
|||
num++;
|
||||
}
|
||||
|
||||
memcpy(connector->display_info.name, lcd_info->name, 32);
|
||||
connector->display_info.width_mm = lcd_info->width_mm;
|
||||
connector->display_info.height_mm = lcd_info->height_mm;
|
||||
connector->display_info.bpc = lcd_info->bpc;
|
||||
|
|
|
@ -148,9 +148,6 @@ static int s6d16d0_get_modes(struct drm_panel *panel)
|
|||
struct drm_connector *connector = panel->connector;
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
strncpy(connector->display_info.name, "Samsung S6D16D0\0",
|
||||
DRM_DISPLAY_INFO_LEN);
|
||||
|
||||
mode = drm_mode_duplicate(panel->drm, &samsung_s6d16d0_mode);
|
||||
if (!mode) {
|
||||
DRM_ERROR("bad mode or failed to add mode\n");
|
||||
|
|
|
@ -390,8 +390,6 @@ static int tpg110_get_modes(struct drm_panel *panel)
|
|||
struct tpg110 *tpg = to_tpg110(panel);
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
strncpy(connector->display_info.name, tpg->panel_mode->name,
|
||||
DRM_DISPLAY_INFO_LEN);
|
||||
connector->display_info.width_mm = tpg->width;
|
||||
connector->display_info.height_mm = tpg->height;
|
||||
connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
* the helper contains a pointer to radeon framebuffer baseclass.
|
||||
*/
|
||||
struct radeon_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct drm_fb_helper helper; /* must be first */
|
||||
struct drm_framebuffer fb;
|
||||
struct radeon_device *rdev;
|
||||
};
|
||||
|
@ -244,7 +244,8 @@ static int radeonfb_create(struct drm_fb_helper *helper,
|
|||
goto out;
|
||||
}
|
||||
|
||||
info->par = rfbdev;
|
||||
/* radeon resume is fragile and needs a vt switch to help it along */
|
||||
info->skip_vt_switch = false;
|
||||
|
||||
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->fb, &mode_cmd, gobj);
|
||||
if (ret) {
|
||||
|
@ -259,10 +260,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
|
|||
|
||||
memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
|
||||
|
||||
strcpy(info->fix.id, "radeondrmfb");
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
|
||||
info->fbops = &radeonfb_ops;
|
||||
|
||||
tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
|
||||
|
@ -271,7 +268,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
|
|||
info->screen_base = rbo->kptr;
|
||||
info->screen_size = radeon_bo_size(rbo);
|
||||
|
||||
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
|
||||
|
||||
/* setup aperture base/size for vesafb takeover */
|
||||
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
|
||||
|
|
|
@ -90,12 +90,10 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
|
|||
goto out;
|
||||
}
|
||||
|
||||
fbi->par = helper;
|
||||
fbi->fbops = &rockchip_drm_fbdev_ops;
|
||||
|
||||
fb = helper->fb;
|
||||
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(fbi, helper, sizes);
|
||||
|
||||
offset = fbi->var.xoffset * bytes_per_pixel;
|
||||
offset += fbi->var.yoffset * fb->pitches[0];
|
||||
|
@ -110,8 +108,6 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
|
|||
rk_obj->kvaddr,
|
||||
offset, size);
|
||||
|
||||
fbi->skip_vt_switch = true;
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
|
|
@ -255,11 +255,9 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
|
|||
helper->fb = fb;
|
||||
helper->fbdev = info;
|
||||
|
||||
info->par = helper;
|
||||
info->fbops = &tegra_fb_ops;
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
|
||||
drm_fb_helper_fill_info(info, helper, sizes);
|
||||
|
||||
offset = info->var.xoffset * bytes_per_pixel +
|
||||
info->var.yoffset * fb->pitches[0];
|
||||
|
|
|
@ -32,7 +32,7 @@ module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
|
|||
module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
|
||||
|
||||
struct udl_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct drm_fb_helper helper; /* must be first */
|
||||
struct udl_framebuffer ufb;
|
||||
int fb_count;
|
||||
};
|
||||
|
@ -392,7 +392,6 @@ static int udlfb_create(struct drm_fb_helper *helper,
|
|||
ret = PTR_ERR(info);
|
||||
goto out_gfree;
|
||||
}
|
||||
info->par = ufbdev;
|
||||
|
||||
ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
|
||||
if (ret)
|
||||
|
@ -402,15 +401,12 @@ static int udlfb_create(struct drm_fb_helper *helper,
|
|||
|
||||
ufbdev->helper.fb = fb;
|
||||
|
||||
strcpy(info->fix.id, "udldrmfb");
|
||||
|
||||
info->screen_base = ufbdev->ufb.obj->vmapping;
|
||||
info->fix.smem_len = size;
|
||||
info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
|
||||
|
||||
info->fbops = &udlfb_ops;
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, &ufbdev->helper, sizes);
|
||||
|
||||
DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
|
||||
fb->width, fb->height,
|
||||
|
|
|
@ -90,13 +90,9 @@ int vboxfb_create(struct drm_fb_helper *helper,
|
|||
if (IS_ERR(info->screen_base))
|
||||
return PTR_ERR(info->screen_base);
|
||||
|
||||
info->par = helper;
|
||||
|
||||
fb = &vbox->afb.base;
|
||||
helper->fb = fb;
|
||||
|
||||
strcpy(info->fix.id, "vboxdrmfb");
|
||||
|
||||
info->fbops = &vboxfb_ops;
|
||||
|
||||
/*
|
||||
|
@ -106,9 +102,7 @@ int vboxfb_create(struct drm_fb_helper *helper,
|
|||
info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
|
||||
info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(info, helper, sizes->fb_width,
|
||||
sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, helper, sizes);
|
||||
|
||||
gpu_addr = vbox_bo_gpu_offset(bo);
|
||||
info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr;
|
||||
|
|
|
@ -28,6 +28,30 @@
|
|||
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
static void virtio_add_bool(struct seq_file *m, const char *name,
|
||||
bool value)
|
||||
{
|
||||
seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no");
|
||||
}
|
||||
|
||||
static void virtio_add_int(struct seq_file *m, const char *name,
|
||||
int value)
|
||||
{
|
||||
seq_printf(m, "%-16s : %d\n", name, value);
|
||||
}
|
||||
|
||||
static int virtio_gpu_features(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
|
||||
|
||||
virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
|
||||
virtio_add_bool(m, "edid", vgdev->has_edid);
|
||||
virtio_add_int(m, "cap sets", vgdev->num_capsets);
|
||||
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
|
||||
{
|
||||
|
@ -41,7 +65,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
|
|||
}
|
||||
|
||||
static struct drm_info_list virtio_gpu_debugfs_list[] = {
|
||||
{ "irq_fence", virtio_gpu_debugfs_irq_info, 0, NULL },
|
||||
{ "virtio-gpu-features", virtio_gpu_features },
|
||||
{ "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL },
|
||||
};
|
||||
|
||||
#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
|
||||
|
|
|
@ -50,6 +50,23 @@
|
|||
#define DRIVER_MINOR 1
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
struct virtio_gpu_object_params {
|
||||
uint32_t format;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
unsigned long size;
|
||||
bool dumb;
|
||||
/* 3d */
|
||||
bool virgl;
|
||||
uint32_t target;
|
||||
uint32_t bind;
|
||||
uint32_t depth;
|
||||
uint32_t array_size;
|
||||
uint32_t last_level;
|
||||
uint32_t nr_samples;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
struct virtio_gpu_object {
|
||||
struct drm_gem_object gem_base;
|
||||
uint32_t hw_res_handle;
|
||||
|
@ -204,6 +221,9 @@ struct virtio_gpu_fpriv {
|
|||
/* virtio_ioctl.c */
|
||||
#define DRM_VIRTIO_NUM_IOCTLS 10
|
||||
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
|
||||
int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *head);
|
||||
void virtio_gpu_unref_list(struct list_head *head);
|
||||
|
||||
/* virtio_kms.c */
|
||||
int virtio_gpu_init(struct drm_device *dev);
|
||||
|
@ -217,16 +237,17 @@ int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
|
|||
void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
|
||||
int virtio_gpu_gem_create(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
uint64_t size,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct drm_gem_object **obj_p,
|
||||
uint32_t *handle_p);
|
||||
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
|
||||
struct drm_file *file);
|
||||
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
|
||||
struct drm_file *file);
|
||||
struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
|
||||
size_t size, bool kernel,
|
||||
bool pinned);
|
||||
struct virtio_gpu_object*
|
||||
virtio_gpu_alloc_object(struct drm_device *dev,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_fence *fence);
|
||||
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
@ -243,9 +264,8 @@ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
|
|||
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
|
||||
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_object *bo,
|
||||
uint32_t format,
|
||||
uint32_t width,
|
||||
uint32_t height);
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_fence *fence);
|
||||
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id);
|
||||
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
|
||||
|
@ -304,7 +324,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
|
|||
void
|
||||
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_object *bo,
|
||||
struct virtio_gpu_resource_create_3d *rc_3d);
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_fence *fence);
|
||||
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
|
||||
void virtio_gpu_cursor_ack(struct virtqueue *vq);
|
||||
void virtio_gpu_fence_ack(struct virtqueue *vq);
|
||||
|
@ -332,6 +353,7 @@ void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
|
|||
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
||||
/* virtio_gpu_fence.c */
|
||||
bool virtio_fence_signaled(struct dma_fence *f);
|
||||
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
|
||||
struct virtio_gpu_device *vgdev);
|
||||
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
|
||||
|
@ -342,8 +364,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
|
|||
|
||||
/* virtio_gpu_object */
|
||||
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
|
||||
unsigned long size, bool kernel, bool pinned,
|
||||
struct virtio_gpu_object **bo_ptr);
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_object **bo_ptr,
|
||||
struct virtio_gpu_fence *fence);
|
||||
void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
|
||||
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
|
||||
int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
|
||||
|
|
|
@ -36,7 +36,7 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
|
|||
return "controlq";
|
||||
}
|
||||
|
||||
static bool virtio_signaled(struct dma_fence *f)
|
||||
bool virtio_fence_signaled(struct dma_fence *f)
|
||||
{
|
||||
struct virtio_gpu_fence *fence = to_virtio_fence(f);
|
||||
|
||||
|
@ -62,7 +62,7 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
|
|||
static const struct dma_fence_ops virtio_fence_ops = {
|
||||
.get_driver_name = virtio_get_driver_name,
|
||||
.get_timeline_name = virtio_get_timeline_name,
|
||||
.signaled = virtio_signaled,
|
||||
.signaled = virtio_fence_signaled,
|
||||
.fence_value_str = virtio_fence_value_str,
|
||||
.timeline_value_str = virtio_timeline_value_str,
|
||||
};
|
||||
|
|
|
@ -34,15 +34,16 @@ void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
|
|||
virtio_gpu_object_unref(&obj);
|
||||
}
|
||||
|
||||
struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
|
||||
size_t size, bool kernel,
|
||||
bool pinned)
|
||||
struct virtio_gpu_object*
|
||||
virtio_gpu_alloc_object(struct drm_device *dev,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct virtio_gpu_object *obj;
|
||||
int ret;
|
||||
|
||||
ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj);
|
||||
ret = virtio_gpu_object_create(vgdev, params, &obj, fence);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
|
@ -51,7 +52,7 @@ struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
|
|||
|
||||
int virtio_gpu_gem_create(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
uint64_t size,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct drm_gem_object **obj_p,
|
||||
uint32_t *handle_p)
|
||||
{
|
||||
|
@ -59,7 +60,7 @@ int virtio_gpu_gem_create(struct drm_file *file,
|
|||
int ret;
|
||||
u32 handle;
|
||||
|
||||
obj = virtio_gpu_alloc_object(dev, size, false, false);
|
||||
obj = virtio_gpu_alloc_object(dev, params, NULL);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
|
@ -82,12 +83,10 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct drm_gem_object *gobj;
|
||||
struct virtio_gpu_object *obj;
|
||||
struct virtio_gpu_object_params params = { 0 };
|
||||
int ret;
|
||||
uint32_t pitch;
|
||||
uint32_t format;
|
||||
|
||||
if (args->bpp != 32)
|
||||
return -EINVAL;
|
||||
|
@ -96,22 +95,16 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
args->size = pitch * args->height;
|
||||
args->size = ALIGN(args->size, PAGE_SIZE);
|
||||
|
||||
ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj,
|
||||
params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
|
||||
params.width = args->width;
|
||||
params.height = args->height;
|
||||
params.size = args->size;
|
||||
params.dumb = true;
|
||||
ret = virtio_gpu_gem_create(file_priv, dev, ¶ms, &gobj,
|
||||
&args->handle);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
|
||||
obj = gem_to_virtio_gpu_obj(gobj);
|
||||
virtio_gpu_cmd_create_resource(vgdev, obj, format,
|
||||
args->width, args->height);
|
||||
|
||||
/* attach the object to the resource */
|
||||
ret = virtio_gpu_object_attach(vgdev, obj, NULL);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
obj->dumb = true;
|
||||
args->pitch = pitch;
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -54,8 +54,8 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
|
|||
&virtio_gpu_map->offset);
|
||||
}
|
||||
|
||||
static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *head)
|
||||
int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct ttm_validate_buffer *buf;
|
||||
|
@ -79,7 +79,7 @@ static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_unref_list(struct list_head *head)
|
||||
void virtio_gpu_unref_list(struct list_head *head)
|
||||
{
|
||||
struct ttm_validate_buffer *buf;
|
||||
struct ttm_buffer_object *bo;
|
||||
|
@ -275,16 +275,12 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
|
|||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct drm_virtgpu_resource_create *rc = data;
|
||||
struct virtio_gpu_fence *fence;
|
||||
int ret;
|
||||
struct virtio_gpu_object *qobj;
|
||||
struct drm_gem_object *obj;
|
||||
uint32_t handle = 0;
|
||||
uint32_t size;
|
||||
struct list_head validate_list;
|
||||
struct ttm_validate_buffer mainbuf;
|
||||
struct virtio_gpu_fence *fence = NULL;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct virtio_gpu_resource_create_3d rc_3d;
|
||||
struct virtio_gpu_object_params params = { 0 };
|
||||
|
||||
if (vgdev->has_virgl_3d == false) {
|
||||
if (rc->depth > 1)
|
||||
|
@ -299,94 +295,43 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&validate_list);
|
||||
memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
|
||||
|
||||
size = rc->size;
|
||||
|
||||
params.format = rc->format;
|
||||
params.width = rc->width;
|
||||
params.height = rc->height;
|
||||
params.size = rc->size;
|
||||
if (vgdev->has_virgl_3d) {
|
||||
params.virgl = true;
|
||||
params.target = rc->target;
|
||||
params.bind = rc->bind;
|
||||
params.depth = rc->depth;
|
||||
params.array_size = rc->array_size;
|
||||
params.last_level = rc->last_level;
|
||||
params.nr_samples = rc->nr_samples;
|
||||
params.flags = rc->flags;
|
||||
}
|
||||
/* allocate a single page size object */
|
||||
if (size == 0)
|
||||
size = PAGE_SIZE;
|
||||
if (params.size == 0)
|
||||
params.size = PAGE_SIZE;
|
||||
|
||||
qobj = virtio_gpu_alloc_object(dev, size, false, false);
|
||||
fence = virtio_gpu_fence_alloc(vgdev);
|
||||
if (!fence)
|
||||
return -ENOMEM;
|
||||
qobj = virtio_gpu_alloc_object(dev, ¶ms, fence);
|
||||
dma_fence_put(&fence->f);
|
||||
if (IS_ERR(qobj))
|
||||
return PTR_ERR(qobj);
|
||||
obj = &qobj->gem_base;
|
||||
|
||||
if (!vgdev->has_virgl_3d) {
|
||||
virtio_gpu_cmd_create_resource(vgdev, qobj, rc->format,
|
||||
rc->width, rc->height);
|
||||
|
||||
ret = virtio_gpu_object_attach(vgdev, qobj, NULL);
|
||||
} else {
|
||||
/* use a gem reference since unref list undoes them */
|
||||
drm_gem_object_get(&qobj->gem_base);
|
||||
mainbuf.bo = &qobj->tbo;
|
||||
list_add(&mainbuf.head, &validate_list);
|
||||
|
||||
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
|
||||
if (ret) {
|
||||
DRM_DEBUG("failed to validate\n");
|
||||
goto fail_unref;
|
||||
}
|
||||
|
||||
rc_3d.resource_id = cpu_to_le32(qobj->hw_res_handle);
|
||||
rc_3d.target = cpu_to_le32(rc->target);
|
||||
rc_3d.format = cpu_to_le32(rc->format);
|
||||
rc_3d.bind = cpu_to_le32(rc->bind);
|
||||
rc_3d.width = cpu_to_le32(rc->width);
|
||||
rc_3d.height = cpu_to_le32(rc->height);
|
||||
rc_3d.depth = cpu_to_le32(rc->depth);
|
||||
rc_3d.array_size = cpu_to_le32(rc->array_size);
|
||||
rc_3d.last_level = cpu_to_le32(rc->last_level);
|
||||
rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
|
||||
rc_3d.flags = cpu_to_le32(rc->flags);
|
||||
|
||||
fence = virtio_gpu_fence_alloc(vgdev);
|
||||
if (!fence) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_backoff;
|
||||
}
|
||||
|
||||
virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d);
|
||||
ret = virtio_gpu_object_attach(vgdev, qobj, fence);
|
||||
if (ret) {
|
||||
dma_fence_put(&fence->f);
|
||||
goto fail_backoff;
|
||||
}
|
||||
ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
|
||||
}
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, obj, &handle);
|
||||
if (ret) {
|
||||
|
||||
drm_gem_object_release(obj);
|
||||
if (vgdev->has_virgl_3d) {
|
||||
virtio_gpu_unref_list(&validate_list);
|
||||
dma_fence_put(&fence->f);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
|
||||
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
|
||||
rc->bo_handle = handle;
|
||||
|
||||
if (vgdev->has_virgl_3d) {
|
||||
virtio_gpu_unref_list(&validate_list);
|
||||
dma_fence_put(&fence->f);
|
||||
}
|
||||
return 0;
|
||||
fail_backoff:
|
||||
ttm_eu_backoff_reservation(&ticket, &validate_list);
|
||||
fail_unref:
|
||||
if (vgdev->has_virgl_3d) {
|
||||
virtio_gpu_unref_list(&validate_list);
|
||||
dma_fence_put(&fence->f);
|
||||
}
|
||||
//fail_obj:
|
||||
// drm_gem_object_handle_unreference_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
|
||||
|
@ -74,39 +76,34 @@ static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
|||
kfree(bo);
|
||||
}
|
||||
|
||||
static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo,
|
||||
bool pinned)
|
||||
static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
|
||||
{
|
||||
u32 c = 1;
|
||||
u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
|
||||
|
||||
vgbo->placement.placement = &vgbo->placement_code;
|
||||
vgbo->placement.busy_placement = &vgbo->placement_code;
|
||||
vgbo->placement_code.fpfn = 0;
|
||||
vgbo->placement_code.lpfn = 0;
|
||||
vgbo->placement_code.flags =
|
||||
TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag;
|
||||
TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
|
||||
TTM_PL_FLAG_NO_EVICT;
|
||||
vgbo->placement.num_placement = c;
|
||||
vgbo->placement.num_busy_placement = c;
|
||||
|
||||
}
|
||||
|
||||
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
|
||||
unsigned long size, bool kernel, bool pinned,
|
||||
struct virtio_gpu_object **bo_ptr)
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_object **bo_ptr,
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_object *bo;
|
||||
enum ttm_bo_type type;
|
||||
size_t acc_size;
|
||||
int ret;
|
||||
|
||||
if (kernel)
|
||||
type = ttm_bo_type_kernel;
|
||||
else
|
||||
type = ttm_bo_type_device;
|
||||
*bo_ptr = NULL;
|
||||
|
||||
acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size,
|
||||
acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
|
||||
sizeof(struct virtio_gpu_object));
|
||||
|
||||
bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
|
||||
|
@ -117,23 +114,62 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
|
|||
kfree(bo);
|
||||
return ret;
|
||||
}
|
||||
size = roundup(size, PAGE_SIZE);
|
||||
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
|
||||
params->size = roundup(params->size, PAGE_SIZE);
|
||||
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
|
||||
if (ret != 0) {
|
||||
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
|
||||
kfree(bo);
|
||||
return ret;
|
||||
}
|
||||
bo->dumb = false;
|
||||
virtio_gpu_init_ttm_placement(bo, pinned);
|
||||
bo->dumb = params->dumb;
|
||||
|
||||
ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, 0, !kernel, acc_size,
|
||||
NULL, NULL, &virtio_gpu_ttm_bo_destroy);
|
||||
if (params->virgl) {
|
||||
virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
|
||||
} else {
|
||||
virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
|
||||
}
|
||||
|
||||
virtio_gpu_init_ttm_placement(bo);
|
||||
ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
|
||||
ttm_bo_type_device, &bo->placement, 0,
|
||||
true, acc_size, NULL, NULL,
|
||||
&virtio_gpu_ttm_bo_destroy);
|
||||
/* ttm_bo_init failure will call the destroy */
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
if (fence) {
|
||||
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
|
||||
struct list_head validate_list;
|
||||
struct ttm_validate_buffer mainbuf;
|
||||
struct ww_acquire_ctx ticket;
|
||||
unsigned long irq_flags;
|
||||
bool signaled;
|
||||
|
||||
INIT_LIST_HEAD(&validate_list);
|
||||
memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
|
||||
|
||||
/* use a gem reference since unref list undoes them */
|
||||
drm_gem_object_get(&bo->gem_base);
|
||||
mainbuf.bo = &bo->tbo;
|
||||
list_add(&mainbuf.head, &validate_list);
|
||||
|
||||
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
|
||||
if (ret == 0) {
|
||||
spin_lock_irqsave(&drv->lock, irq_flags);
|
||||
signaled = virtio_fence_signaled(&fence->f);
|
||||
if (!signaled)
|
||||
/* virtio create command still in flight */
|
||||
ttm_eu_fence_buffer_objects(&ticket, &validate_list,
|
||||
&fence->f);
|
||||
spin_unlock_irqrestore(&drv->lock, irq_flags);
|
||||
if (signaled)
|
||||
/* virtio create command finished */
|
||||
ttm_eu_backoff_reservation(&ticket, &validate_list);
|
||||
}
|
||||
virtio_gpu_unref_list(&validate_list);
|
||||
}
|
||||
|
||||
*bo_ptr = bo;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -116,10 +116,6 @@ static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
|
|||
static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev;
|
||||
|
||||
vgdev = virtio_gpu_get_vgdev(bdev);
|
||||
|
||||
switch (type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
/* System memory */
|
||||
|
@ -194,42 +190,45 @@ static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
|
|||
*/
|
||||
struct virtio_gpu_ttm_tt {
|
||||
struct ttm_dma_tt ttm;
|
||||
struct virtio_gpu_device *vgdev;
|
||||
u64 offset;
|
||||
struct virtio_gpu_object *obj;
|
||||
};
|
||||
|
||||
static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *bo_mem)
|
||||
static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct virtio_gpu_ttm_tt *gtt =
|
||||
container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
|
||||
struct virtio_gpu_device *vgdev =
|
||||
virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
|
||||
|
||||
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
|
||||
if (!ttm->num_pages)
|
||||
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
|
||||
ttm->num_pages, bo_mem, ttm);
|
||||
|
||||
/* Not implemented */
|
||||
virtio_gpu_object_attach(vgdev, gtt->obj, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||
static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
/* Not implemented */
|
||||
struct virtio_gpu_ttm_tt *gtt =
|
||||
container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
|
||||
struct virtio_gpu_device *vgdev =
|
||||
virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
|
||||
|
||||
virtio_gpu_object_detach(vgdev, gtt->obj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
|
||||
static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct virtio_gpu_ttm_tt *gtt =
|
||||
container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
|
||||
|
||||
ttm_dma_tt_fini(>t->ttm);
|
||||
kfree(gtt);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func virtio_gpu_backend_func = {
|
||||
.bind = &virtio_gpu_ttm_backend_bind,
|
||||
.unbind = &virtio_gpu_ttm_backend_unbind,
|
||||
.destroy = &virtio_gpu_ttm_backend_destroy,
|
||||
static struct ttm_backend_func virtio_gpu_tt_func = {
|
||||
.bind = &virtio_gpu_ttm_tt_bind,
|
||||
.unbind = &virtio_gpu_ttm_tt_unbind,
|
||||
.destroy = &virtio_gpu_ttm_tt_destroy,
|
||||
};
|
||||
|
||||
static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
|
@ -242,8 +241,8 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||
gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
|
||||
if (gtt == NULL)
|
||||
return NULL;
|
||||
gtt->ttm.ttm.func = &virtio_gpu_backend_func;
|
||||
gtt->vgdev = vgdev;
|
||||
gtt->ttm.ttm.func = &virtio_gpu_tt_func;
|
||||
gtt->obj = container_of(bo, struct virtio_gpu_object, tbo);
|
||||
if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) {
|
||||
kfree(gtt);
|
||||
return NULL;
|
||||
|
@ -251,58 +250,11 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||
return >t->ttm.ttm;
|
||||
}
|
||||
|
||||
static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
|
||||
BUG_ON(old_mem->mm_node != NULL);
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
}
|
||||
|
||||
static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict,
|
||||
struct ttm_operation_ctx *ctx,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
virtio_gpu_move_null(bo, new_mem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
|
||||
bool evict,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct virtio_gpu_object *bo;
|
||||
struct virtio_gpu_device *vgdev;
|
||||
|
||||
bo = container_of(tbo, struct virtio_gpu_object, tbo);
|
||||
vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
|
||||
|
||||
if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
|
||||
if (bo->hw_res_handle)
|
||||
virtio_gpu_object_detach(vgdev, bo);
|
||||
|
||||
} else if (new_mem->placement & TTM_PL_FLAG_TT) {
|
||||
if (bo->hw_res_handle) {
|
||||
virtio_gpu_object_attach(vgdev, bo, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct virtio_gpu_object *bo;
|
||||
struct virtio_gpu_device *vgdev;
|
||||
|
||||
bo = container_of(tbo, struct virtio_gpu_object, tbo);
|
||||
vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
|
||||
|
||||
if (bo->pages)
|
||||
virtio_gpu_object_free_sg_table(bo);
|
||||
|
@ -314,11 +266,9 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
|
|||
.init_mem_type = &virtio_gpu_init_mem_type,
|
||||
.eviction_valuable = ttm_bo_eviction_valuable,
|
||||
.evict_flags = &virtio_gpu_evict_flags,
|
||||
.move = &virtio_gpu_bo_move,
|
||||
.verify_access = &virtio_gpu_verify_access,
|
||||
.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
|
||||
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
|
||||
.move_notify = &virtio_gpu_bo_move_notify,
|
||||
.swap_notify = &virtio_gpu_bo_swap_notify,
|
||||
};
|
||||
|
||||
|
|
|
@ -376,9 +376,8 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
|
|||
/* create a basic resource */
|
||||
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_object *bo,
|
||||
uint32_t format,
|
||||
uint32_t width,
|
||||
uint32_t height)
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_resource_create_2d *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
|
@ -388,11 +387,11 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
|
|||
|
||||
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
|
||||
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
|
||||
cmd_p->format = cpu_to_le32(format);
|
||||
cmd_p->width = cpu_to_le32(width);
|
||||
cmd_p->height = cpu_to_le32(height);
|
||||
cmd_p->format = cpu_to_le32(params->format);
|
||||
cmd_p->width = cpu_to_le32(params->width);
|
||||
cmd_p->height = cpu_to_le32(params->height);
|
||||
|
||||
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
|
||||
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
|
||||
bo->created = true;
|
||||
}
|
||||
|
||||
|
@ -828,7 +827,8 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
|
|||
void
|
||||
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_object *bo,
|
||||
struct virtio_gpu_resource_create_3d *rc_3d)
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_resource_create_3d *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
|
@ -836,11 +836,21 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
|
|||
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
||||
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||
|
||||
*cmd_p = *rc_3d;
|
||||
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
|
||||
cmd_p->hdr.flags = 0;
|
||||
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
|
||||
cmd_p->format = cpu_to_le32(params->format);
|
||||
cmd_p->width = cpu_to_le32(params->width);
|
||||
cmd_p->height = cpu_to_le32(params->height);
|
||||
|
||||
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
|
||||
cmd_p->target = cpu_to_le32(params->target);
|
||||
cmd_p->bind = cpu_to_le32(params->bind);
|
||||
cmd_p->depth = cpu_to_le32(params->depth);
|
||||
cmd_p->array_size = cpu_to_le32(params->array_size);
|
||||
cmd_p->last_level = cpu_to_le32(params->last_level);
|
||||
cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
|
||||
cmd_p->flags = cpu_to_le32(params->flags);
|
||||
|
||||
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
|
||||
bo->created = true;
|
||||
}
|
||||
|
||||
|
@ -924,8 +934,8 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
|
|||
struct scatterlist *sg;
|
||||
int si, nents;
|
||||
|
||||
if (!obj->created)
|
||||
return 0;
|
||||
if (WARN_ON_ONCE(!obj->created))
|
||||
return -EINVAL;
|
||||
|
||||
if (!obj->pages) {
|
||||
int ret;
|
||||
|
|
|
@ -219,6 +219,8 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
|
|||
spin_lock_init(&vkms_out->state_lock);
|
||||
|
||||
vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
|
||||
if (!vkms_out->crc_workq)
|
||||
return -ENOMEM;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -355,26 +355,16 @@ enum drm_bus_flags {
|
|||
* drm_add_edid_modes().
|
||||
*/
|
||||
struct drm_display_info {
|
||||
/**
|
||||
* @name: Name of the display.
|
||||
*/
|
||||
char name[DRM_DISPLAY_INFO_LEN];
|
||||
|
||||
/**
|
||||
* @width_mm: Physical width in mm.
|
||||
*/
|
||||
unsigned int width_mm;
|
||||
unsigned int width_mm;
|
||||
|
||||
/**
|
||||
* @height_mm: Physical height in mm.
|
||||
*/
|
||||
unsigned int height_mm;
|
||||
|
||||
/**
|
||||
* @pixel_clock: Maximum pixel clock supported by the sink, in units of
|
||||
* 100Hz. This mismatches the clock in &drm_display_mode (which is in
|
||||
* kHZ), because that's what the EDID uses as base unit.
|
||||
*/
|
||||
unsigned int pixel_clock;
|
||||
/**
|
||||
* @bpc: Maximum bits per color channel. Used by HDMI and DP outputs.
|
||||
*/
|
||||
|
|
|
@ -152,9 +152,9 @@ enum drm_driver_feature {
|
|||
/**
|
||||
* struct drm_driver - DRM driver structure
|
||||
*
|
||||
* This structure represent the common code for a family of cards. There will
|
||||
* one drm_device for each card present in this family. It contains lots of
|
||||
* vfunc entries, and a pile of those probably should be moved to more
|
||||
* This structure represent the common code for a family of cards. There will be
|
||||
* one &struct drm_device for each card present in this family. It contains lots
|
||||
* of vfunc entries, and a pile of those probably should be moved to more
|
||||
* appropriate places like &drm_mode_config_funcs or into a new operations
|
||||
* structure for GEM drivers.
|
||||
*/
|
||||
|
|
|
@ -68,10 +68,8 @@ struct drm_fb_helper_crtc {
|
|||
* according to the largest width/height (so it is large enough for all CRTCs
|
||||
* to scanout). But the fbdev width/height is sized to the minimum width/
|
||||
* height of all the displays. This ensures that fbcon fits on the smallest
|
||||
* of the attached displays.
|
||||
*
|
||||
* So what is passed to drm_fb_helper_fill_var() should be fb_width/fb_height,
|
||||
* rather than the surface size.
|
||||
* of the attached displays. fb_width/fb_height is used by
|
||||
* drm_fb_helper_fill_info() to fill out the &fb_info.var structure.
|
||||
*/
|
||||
struct drm_fb_helper_surface_size {
|
||||
u32 fb_width;
|
||||
|
@ -289,10 +287,9 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
|
|||
|
||||
struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
|
||||
void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
|
||||
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
|
||||
uint32_t fb_width, uint32_t fb_height);
|
||||
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
|
||||
uint32_t depth);
|
||||
void drm_fb_helper_fill_info(struct fb_info *info,
|
||||
struct drm_fb_helper *fb_helper,
|
||||
struct drm_fb_helper_surface_size *sizes);
|
||||
|
||||
void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
|
||||
|
||||
|
@ -418,14 +415,10 @@ static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void drm_fb_helper_fill_var(struct fb_info *info,
|
||||
struct drm_fb_helper *fb_helper,
|
||||
uint32_t fb_width, uint32_t fb_height)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
|
||||
uint32_t depth)
|
||||
static inline void
|
||||
drm_fb_helper_fill_info(struct fb_info *info,
|
||||
struct drm_fb_helper *fb_helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define DRM_DISPLAY_INFO_LEN 32
|
||||
#define DRM_CONNECTOR_NAME_LEN 32
|
||||
#define DRM_DISPLAY_MODE_LEN 32
|
||||
#define DRM_PROP_NAME_LEN 32
|
||||
|
|
Loading…
Reference in New Issue