Merge branch 'drm-vmwgfx-next' of git://people.freedesktop.org/~syeh/repos_linux into drm-next

This series enables atomic mode set for vmwgfx.  A number of features and
larger fixes are also included.

* 'drm-vmwgfx-next' of git://people.freedesktop.org/~syeh/repos_linux: (22 commits)
  drm/vmwgfx: Properly check display/scanout surface size
  drm/vmwgfx: Support topology greater than texture size
  drm/vmwgfx: Define an overlaid handle_close ioctl.
  drm/vmwgfx: Re-implement the stream resource as a simple resource.
  drm/vmwgfx: Introduce a simple resource type
  drm/vmwgfx: Revert "drm/vmwgfx: Replace numeric parameter like 0444 with macro"
  drm/vmwgfx: Fix LDU X blank screen until mode change issue
  drm/vmwgfx: Skipping fbdev fb pinning for ldu
  drm/vmwgfx: Explicityly track screen target width and height
  drm/vmwgfx: Turn on DRIVER_ATOMIC flag
  drm/vmwgfx: Switch over to internal atomic API for SOU and LDU
  drm/vmwgfx: Switch over to internal atomic API for STDU
  drm/vmwgfx: Fixes to vmwgfx_fb
  drm/vmwgfx: Add and connect atomic state object check/commit
  drm/vmwgfx: Add and connect connector helper function
  drm/vmwgfx: Add and connect plane helper functions
  drm/vmwgfx: Add and connect CRTC helper functions
  drm/vmwgfx: Connector atomic state
  drm/vmwgfx: Plane atomic state
  drm/vmwgfx: CRTC atomic state
  ...
This commit is contained in:
Dave Airlie 2017-04-02 16:10:55 +10:00
commit 7558ab6642
15 changed files with 2828 additions and 992 deletions

View File

@ -8,6 +8,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o

View File

@ -246,13 +246,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr);
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
@ -650,6 +650,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
spin_lock_init(&dev_priv->svga_lock);
spin_lock_init(&dev_priv->cursor_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
@ -897,6 +898,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_fifo;
DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
DRM_INFO("Atomic: %s\n",
(dev->driver->driver_features & DRIVER_ATOMIC) ? "yes" : "no");
snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
VMWGFX_REPO, VMWGFX_GIT_VERSION);
@ -1509,7 +1512,7 @@ static const struct file_operations vmwgfx_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
.load = vmw_driver_load,
.unload = vmw_driver_unload,
.lastclose = vmw_lastclose,

View File

@ -153,7 +153,6 @@ enum vmw_cmdbuf_res_type {
struct vmw_cmdbuf_res_manager;
struct vmw_cursor_snooper {
struct drm_crtc *crtc;
size_t age;
uint32_t *image;
};
@ -415,6 +414,7 @@ struct vmw_private {
unsigned num_implicit;
struct vmw_framebuffer *implicit_fb;
struct mutex global_kms_state_mutex;
spinlock_t cursor_lock;
/*
* Context and surface management.

View File

@ -434,7 +434,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
set.y = 0;
set.mode = NULL;
set.fb = NULL;
set.num_connectors = 1;
set.num_connectors = 0;
set.connectors = &par->con;
ret = drm_mode_set_config_internal(&set);
if (ret) {
@ -451,13 +451,15 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
}
if (par->vmw_bo && detach_bo) {
struct vmw_private *vmw_priv = par->vmw_priv;
if (par->bo_ptr) {
ttm_bo_kunmap(&par->map);
par->bo_ptr = NULL;
}
if (unref_bo)
vmw_dmabuf_unreference(&par->vmw_bo);
else
else if (vmw_priv->active_display_unit != vmw_du_legacy)
vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
}
@ -585,18 +587,25 @@ static int vmw_fb_set_par(struct fb_info *info)
/*
* Pin before mapping. Since we don't know in what placement
* to pin, call into KMS to do it for us.
* to pin, call into KMS to do it for us. LDU doesn't require
* additional pinning because set_config() would've pinned
* it already
*/
if (vmw_priv->active_display_unit != vmw_du_legacy) {
ret = vfb->pin(vfb);
if (ret) {
DRM_ERROR("Could not pin the fbdev framebuffer.\n");
DRM_ERROR("Could not pin the fbdev "
"framebuffer.\n");
goto out_unlock;
}
}
ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
par->vmw_bo->base.num_pages, &par->map);
if (ret) {
if (vmw_priv->active_display_unit != vmw_du_legacy)
vfb->unpin(vfb);
DRM_ERROR("Could not map the fbdev framebuffer.\n");
goto out_unlock;
}
@ -822,7 +831,9 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
flush_delayed_work(&par->local_work);
mutex_lock(&par->bo_mutex);
drm_modeset_lock_all(vmw_priv->dev);
(void) vmw_fb_kms_detach(par, true, false);
drm_modeset_unlock_all(vmw_priv->dev);
mutex_unlock(&par->bo_mutex);
return 0;

File diff suppressed because it is too large Load Diff

View File

@ -33,6 +33,8 @@
#include <drm/drm_encoder.h>
#include "vmwgfx_drv.h"
/**
* struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
* function.
@ -125,19 +127,71 @@ struct vmw_framebuffer_dmabuf {
};
/*
* Basic cursor manipulation
*/
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y);
static const uint32_t vmw_primary_plane_formats[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
static const uint32_t vmw_cursor_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
#define vmw_crtc_state_to_vcs(x) container_of(x, struct vmw_crtc_state, base)
#define vmw_plane_state_to_vps(x) container_of(x, struct vmw_plane_state, base)
#define vmw_connector_state_to_vcs(x) \
container_of(x, struct vmw_connector_state, base)
/**
* Derived class for crtc state object
*
* @base DRM crtc object
*/
struct vmw_crtc_state {
struct drm_crtc_state base;
};
/**
* Derived class for plane state object
*
* @base DRM plane object
* @surf Display surface for STDU
* @dmabuf display dmabuf for SOU
* @content_fb_type Used by STDU.
* @dmabuf_size Size of the dmabuf, used by Screen Object Display Unit
* @pinned pin count for STDU display surface
*/
struct vmw_plane_state {
struct drm_plane_state base;
struct vmw_surface *surf;
struct vmw_dma_buffer *dmabuf;
int content_fb_type;
unsigned long dmabuf_size;
int pinned;
/* For CPU Blit */
struct ttm_bo_kmap_obj host_map, guest_map;
unsigned int cpp;
};
/**
* Derived class for connector state object
*
* @base DRM connector object
* @is_implicit connector property
*
*/
struct vmw_connector_state {
struct drm_connector_state base;
bool is_implicit;
};
/**
* Base class display unit.
@ -150,6 +204,8 @@ struct vmw_display_unit {
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
struct drm_plane primary;
struct drm_plane cursor;
struct vmw_surface *cursor_surface;
struct vmw_dma_buffer *cursor_dmabuf;
@ -203,6 +259,18 @@ int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height,
int32_t hot_x, int32_t hot_y);
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val);
int vmw_du_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val);
int
vmw_du_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val);
int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
void vmw_du_connector_save(struct drm_connector *connector);
void vmw_du_connector_restore(struct drm_connector *connector);
@ -210,9 +278,6 @@ enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force);
int vmw_du_connector_fill_modes(struct drm_connector *connector,
uint32_t max_width, uint32_t max_height);
int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val);
int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
const struct drm_clip_rect *clips,
@ -270,6 +335,55 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
bool immutable);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
int vmw_du_cursor_plane_disable(struct drm_plane *plane);
int vmw_du_cursor_plane_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w,
unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
/* Atomic Helpers */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state);
int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state);
void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state);
void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state);
int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
void vmw_du_plane_reset(struct drm_plane *plane);
struct drm_plane_state *vmw_du_plane_duplicate_state(struct drm_plane *plane);
void vmw_du_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
bool unreference);
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
void vmw_du_crtc_reset(struct drm_crtc *crtc);
struct drm_crtc_state *vmw_du_crtc_duplicate_state(struct drm_crtc *crtc);
void vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void vmw_du_connector_reset(struct drm_connector *connector);
struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector *connector);
void vmw_du_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
/*
* Legacy display unit functions - vmwgfx_ldu.c
@ -339,5 +453,6 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
bool to_surface,
bool interruptible);
int vmw_kms_set_config(struct drm_mode_set *set);
#endif

View File

@ -27,6 +27,8 @@
#include "vmwgfx_kms.h"
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#define vmw_crtc_to_ldu(x) \
@ -75,10 +77,9 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
struct vmw_display_unit *du = NULL;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
int i = 0, ret;
int i = 0;
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
@ -94,7 +95,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
if (crtc == NULL)
return 0;
fb = entry->base.crtc.primary->fb;
fb = entry->base.crtc.primary->state->fb;
return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
fb->format->cpp[0] * 8,
@ -103,7 +104,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
if (!list_empty(&lds->active)) {
entry = list_entry(lds->active.next, typeof(*entry), active);
fb = entry->base.crtc.primary->fb;
fb = entry->base.crtc.primary->state->fb;
vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
fb->format->cpp[0] * 8, fb->format->depth);
@ -132,25 +133,6 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
lds->last_num_active = lds->num_active;
/* Find the first du with a cursor. */
list_for_each_entry(entry, &lds->active, active) {
du = &entry->base;
if (!du->cursor_dmabuf)
continue;
ret = vmw_cursor_update_dmabuf(dev_priv,
du->cursor_dmabuf,
64, 64,
du->hotspot_x,
du->hotspot_y);
if (ret == 0)
break;
DRM_ERROR("Could not update cursor image\n");
}
return 0;
}
@ -185,6 +167,7 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
if (vfb != ld->fb) {
if (ld->fb && ld->fb->unpin)
ld->fb->unpin(ld->fb);
vmw_svga_enable(vmw_priv);
if (vfb->pin)
vfb->pin(vfb);
ld->fb = vfb;
@ -208,101 +191,59 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
return 0;
}
static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
/**
* vmw_ldu_crtc_mode_set_nofb - Enable svga
*
* @crtc: CRTC associated with the new screen
*
* For LDU, just enable the svga
*/
static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct vmw_private *dev_priv;
struct vmw_legacy_display_unit *ldu;
struct drm_connector *connector;
struct drm_display_mode *mode;
struct drm_encoder *encoder;
struct vmw_framebuffer *vfb;
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
}
if (!set)
return -EINVAL;
/**
* vmw_ldu_crtc_helper_prepare - Noop
*
* @crtc: CRTC associated with the new screen
*
* Prepares the CRTC for a mode set, but we don't need to do anything here.
*
*/
static void vmw_ldu_crtc_helper_prepare(struct drm_crtc *crtc)
{
}
if (!set->crtc)
return -EINVAL;
/**
* vmw_ldu_crtc_helper_commit - Noop
*
* @crtc: CRTC associated with the new screen
*
* This is called after a mode set has been completed. Here's
* usually a good place to call vmw_ldu_add_active/vmw_ldu_del_active
* but since for LDU the display plane is closely tied to the
* CRTC, it makes more sense to do those at plane update time.
*/
static void vmw_ldu_crtc_helper_commit(struct drm_crtc *crtc)
{
}
/* get the ldu */
crtc = set->crtc;
ldu = vmw_crtc_to_ldu(crtc);
vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
dev_priv = vmw_priv(crtc->dev);
if (set->num_connectors > 1) {
DRM_ERROR("to many connectors\n");
return -EINVAL;
}
if (set->num_connectors == 1 &&
set->connectors[0] != &ldu->base.connector) {
DRM_ERROR("connector doesn't match %p %p\n",
set->connectors[0], &ldu->base.connector);
return -EINVAL;
}
/* ldu only supports one fb active at the time */
if (dev_priv->ldu_priv->fb && vfb &&
!(dev_priv->ldu_priv->num_active == 1 &&
!list_empty(&ldu->active)) &&
dev_priv->ldu_priv->fb != vfb) {
DRM_ERROR("Multiple framebuffers not supported\n");
return -EINVAL;
}
/* since they always map one to one these are safe */
connector = &ldu->base.connector;
encoder = &ldu->base.encoder;
/* should we turn the crtc off? */
if (set->num_connectors == 0 || !set->mode || !set->fb) {
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->primary->fb = NULL;
crtc->enabled = false;
vmw_ldu_del_active(dev_priv, ldu);
return vmw_ldu_commit_list(dev_priv);
}
/* we now know we want to set a mode */
mode = set->mode;
fb = set->fb;
if (set->x + mode->hdisplay > fb->width ||
set->y + mode->vdisplay > fb->height) {
DRM_ERROR("set outside of framebuffer\n");
return -EINVAL;
}
vmw_svga_enable(dev_priv);
crtc->primary->fb = fb;
encoder->crtc = crtc;
connector->encoder = encoder;
crtc->x = set->x;
crtc->y = set->y;
crtc->mode = *mode;
crtc->enabled = true;
ldu->base.set_gui_x = set->x;
ldu->base.set_gui_y = set->y;
vmw_ldu_add_active(dev_priv, ldu, vfb);
return vmw_ldu_commit_list(dev_priv);
/**
* vmw_ldu_crtc_helper_disable - Turns off CRTC
*
* @crtc: CRTC to be turned off
*/
static void vmw_ldu_crtc_helper_disable(struct drm_crtc *crtc)
{
}
static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.cursor_set2 = vmw_du_crtc_cursor_set2,
.cursor_move = vmw_du_crtc_cursor_move,
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_ldu_crtc_destroy,
.set_config = vmw_ldu_crtc_set_config,
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = vmw_kms_set_config,
};
@ -334,15 +275,138 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.fill_modes = vmw_du_connector_fill_modes,
.set_property = vmw_du_connector_set_property,
.destroy = vmw_ldu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_set_property = vmw_du_connector_atomic_set_property,
.atomic_get_property = vmw_du_connector_atomic_get_property,
};
static const struct
drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
.best_encoder = drm_atomic_helper_best_encoder,
};
/*
* Legacy Display Plane Functions
*/
/**
* vmw_ldu_primary_plane_cleanup_fb - Noop
*
* @plane: display plane
* @old_state: Contains the FB to clean up
*
* Unpins the display surface
*
* Returns 0 on success
*/
static void
vmw_ldu_primary_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
}
/**
* vmw_ldu_primary_plane_prepare_fb - Noop
*
* @plane: display plane
* @new_state: info on the new plane state, including the FB
*
* Returns 0 on success
*/
static int
vmw_ldu_primary_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
return 0;
}
static void
vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct vmw_private *dev_priv;
struct vmw_legacy_display_unit *ldu;
struct vmw_framebuffer *vfb;
struct drm_framebuffer *fb;
struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
ldu = vmw_crtc_to_ldu(crtc);
dev_priv = vmw_priv(plane->dev);
fb = plane->state->fb;
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
if (vfb)
vmw_ldu_add_active(dev_priv, ldu, vfb);
else
vmw_ldu_del_active(dev_priv, ldu);
vmw_ldu_commit_list(dev_priv);
}
static const struct drm_plane_funcs vmw_ldu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = vmw_du_primary_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
};
static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = vmw_du_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
};
/*
* Atomic Helpers
*/
static const struct
drm_plane_helper_funcs vmw_ldu_cursor_plane_helper_funcs = {
.atomic_check = vmw_du_cursor_plane_atomic_check,
.atomic_update = vmw_du_cursor_plane_atomic_update,
.prepare_fb = vmw_du_cursor_plane_prepare_fb,
.cleanup_fb = vmw_du_plane_cleanup_fb,
};
static const struct
drm_plane_helper_funcs vmw_ldu_primary_plane_helper_funcs = {
.atomic_check = vmw_du_primary_plane_atomic_check,
.atomic_update = vmw_ldu_primary_plane_atomic_update,
.prepare_fb = vmw_ldu_primary_plane_prepare_fb,
.cleanup_fb = vmw_ldu_primary_plane_cleanup_fb,
};
static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
.prepare = vmw_ldu_crtc_helper_prepare,
.commit = vmw_ldu_crtc_helper_commit,
.disable = vmw_ldu_crtc_helper_disable,
.mode_set_nofb = vmw_ldu_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
.atomic_flush = vmw_du_crtc_atomic_flush,
};
static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_legacy_display_unit *ldu;
struct drm_device *dev = dev_priv->dev;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_plane *primary, *cursor;
struct drm_crtc *crtc;
int ret;
ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
if (!ldu)
@ -352,6 +416,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
crtc = &ldu->base.crtc;
encoder = &ldu->base.encoder;
connector = &ldu->base.connector;
primary = &ldu->base.primary;
cursor = &ldu->base.cursor;
INIT_LIST_HEAD(&ldu->active);
@ -359,21 +425,86 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
ldu->base.pref_width = dev_priv->initial_width;
ldu->base.pref_height = dev_priv->initial_height;
ldu->base.pref_mode = NULL;
/*
* Remove this after enabling atomic because property values can
* only exist in a state object
*/
ldu->base.is_implicit = true;
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
connector->status = vmw_du_connector_detect(connector, true);
/* Initialize primary plane */
vmw_du_plane_reset(primary);
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
ret = drm_universal_plane_init(dev, &ldu->base.primary,
0, &vmw_ldu_plane_funcs,
vmw_primary_plane_formats,
ARRAY_SIZE(vmw_primary_plane_formats),
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
DRM_ERROR("Failed to initialize primary plane");
goto err_free;
}
drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs);
/* Initialize cursor plane */
vmw_du_plane_reset(cursor);
ret = drm_universal_plane_init(dev, &ldu->base.cursor,
0, &vmw_ldu_cursor_funcs,
vmw_cursor_plane_formats,
ARRAY_SIZE(vmw_cursor_plane_formats),
DRM_PLANE_TYPE_CURSOR, NULL);
if (ret) {
DRM_ERROR("Failed to initialize cursor plane");
drm_plane_cleanup(&ldu->base.primary);
goto err_free;
}
drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
vmw_du_connector_reset(connector);
ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
DRM_ERROR("Failed to initialize connector\n");
goto err_free;
}
drm_connector_helper_add(connector, &vmw_ldu_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, true);
vmw_connector_state_to_vcs(connector->state)->is_implicit = true;
ret = drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
drm_mode_connector_attach_encoder(connector, encoder);
if (ret) {
DRM_ERROR("Failed to initialize encoder\n");
goto err_free_connector;
}
(void) drm_mode_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
(void) drm_connector_register(connector);
ret = drm_connector_register(connector);
if (ret) {
DRM_ERROR("Failed to register connector\n");
goto err_free_encoder;
}
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
vmw_du_crtc_reset(crtc);
ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
&ldu->base.cursor,
&vmw_legacy_crtc_funcs, NULL);
if (ret) {
DRM_ERROR("Failed to initialize CRTC\n");
goto err_free_unregister;
}
drm_crtc_helper_add(crtc, &vmw_ldu_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(crtc, 256);
@ -390,6 +521,16 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
1);
return 0;
err_free_unregister:
drm_connector_unregister(connector);
err_free_encoder:
drm_encoder_cleanup(encoder);
err_free_connector:
drm_connector_cleanup(connector);
err_free:
kfree(ldu);
return ret;
}
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)

View File

@ -45,31 +45,6 @@ struct vmw_bo_user_rep {
uint64_t map_handle;
};
struct vmw_stream {
struct vmw_resource res;
uint32_t stream_id;
};
struct vmw_user_stream {
struct ttm_base_object base;
struct vmw_stream stream;
};
static uint64_t vmw_user_stream_size;
static const struct vmw_res_func vmw_stream_func = {
.res_type = vmw_res_stream,
.needs_backup = false,
.may_evict = false,
.type_name = "video streams",
.backup_placement = NULL,
.create = NULL,
.destroy = NULL,
.bind = NULL,
.unbind = NULL
};
static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object *bo)
{
@ -259,24 +234,6 @@ void vmw_resource_activate(struct vmw_resource *res,
write_unlock(&dev_priv->resource_lock);
}
static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
struct idr *idr, int id)
{
struct vmw_resource *res;
read_lock(&dev_priv->resource_lock);
res = idr_find(idr, id);
if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
res = NULL;
read_unlock(&dev_priv->resource_lock);
if (unlikely(res == NULL))
return NULL;
return res;
}
/**
* vmw_user_resource_lookup_handle - lookup a struct resource from a
* TTM user-space handle and perform basic type checks
@ -776,217 +733,6 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
TTM_REF_USAGE, NULL);
}
/*
* Stream management
*/
static void vmw_stream_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_stream *stream;
int ret;
DRM_INFO("%s: unref\n", __func__);
stream = container_of(res, struct vmw_stream, res);
ret = vmw_overlay_unref(dev_priv, stream->stream_id);
WARN_ON(ret != 0);
}
static int vmw_stream_init(struct vmw_private *dev_priv,
struct vmw_stream *stream,
void (*res_free) (struct vmw_resource *res))
{
struct vmw_resource *res = &stream->res;
int ret;
ret = vmw_resource_init(dev_priv, res, false, res_free,
&vmw_stream_func);
if (unlikely(ret != 0)) {
if (res_free == NULL)
kfree(stream);
else
res_free(&stream->res);
return ret;
}
ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
if (ret) {
vmw_resource_unreference(&res);
return ret;
}
DRM_INFO("%s: claimed\n", __func__);
vmw_resource_activate(&stream->res, vmw_stream_destroy);
return 0;
}
static void vmw_user_stream_free(struct vmw_resource *res)
{
struct vmw_user_stream *stream =
container_of(res, struct vmw_user_stream, stream.res);
struct vmw_private *dev_priv = res->dev_priv;
ttm_base_object_kfree(stream, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_stream_size);
}
/**
* This function is called when user space has no more references on the
* base object. It releases the base-object's reference on the resource object.
*/
static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_stream *stream =
container_of(base, struct vmw_user_stream, base);
struct vmw_resource *res = &stream->stream.res;
*p_base = NULL;
vmw_resource_unreference(&res);
}
int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_resource *res;
struct vmw_user_stream *stream;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
int ret = 0;
res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
if (unlikely(res == NULL))
return -EINVAL;
if (res->res_free != &vmw_user_stream_free) {
ret = -EINVAL;
goto out;
}
stream = container_of(res, struct vmw_user_stream, stream.res);
if (stream->base.tfile != tfile) {
ret = -EINVAL;
goto out;
}
ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
out:
vmw_resource_unreference(&res);
return ret;
}
int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_stream *stream;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of streams anyway?
*/
if (unlikely(vmw_user_stream_size == 0))
vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_stream_size,
false, true);
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for stream"
" creation.\n");
goto out_ret;
}
stream = kmalloc(sizeof(*stream), GFP_KERNEL);
if (unlikely(stream == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_stream_size);
ret = -ENOMEM;
goto out_ret;
}
res = &stream->stream.res;
stream->base.shareable = false;
stream->base.tfile = NULL;
/*
* From here on, the destructor takes over resource freeing.
*/
ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
if (unlikely(ret != 0))
goto out_ret;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
&vmw_user_stream_base_release, NULL);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
goto out_err;
}
arg->stream_id = res->id;
out_err:
vmw_resource_unreference(&res);
out_ret:
return ret;
}
int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t *inout_id, struct vmw_resource **out)
{
struct vmw_user_stream *stream;
struct vmw_resource *res;
int ret;
res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
*inout_id);
if (unlikely(res == NULL))
return -EINVAL;
if (res->res_free != &vmw_user_stream_free) {
ret = -EINVAL;
goto err_ref;
}
stream = container_of(res, struct vmw_user_stream, stream.res);
if (stream->base.tfile != tfile) {
ret = -EPERM;
goto err_ref;
}
*inout_id = stream->stream.stream_id;
*out = res;
return 0;
err_ref:
vmw_resource_unreference(&res);
return ret;
}
/**
* vmw_dumb_create - Create a dumb kms buffer
*

View File

@ -30,6 +30,8 @@
#include "vmwgfx_drv.h"
#define VMW_IDA_ACC_SIZE 128
enum vmw_cmdbuf_res_state {
VMW_CMDBUF_RES_COMMITTED,
VMW_CMDBUF_RES_ADD,
@ -83,6 +85,35 @@ struct vmw_res_func {
enum vmw_cmdbuf_res_state state);
};
/**
* struct vmw_simple_resource_func - members and functions common for the
* simple resource helpers.
* @res_func: struct vmw_res_func as described above.
* @ttm_res_type: TTM resource type used for handle recognition.
* @size: Size of the simple resource information struct.
* @init: Initialize the simple resource information.
* @hw_destroy: A resource hw_destroy function.
* @set_arg_handle: Set the handle output argument of the ioctl create struct.
*/
struct vmw_simple_resource_func {
const struct vmw_res_func res_func;
int ttm_res_type;
size_t size;
int (*init)(struct vmw_resource *res, void *data);
void (*hw_destroy)(struct vmw_resource *res);
void (*set_arg_handle)(void *data, u32 handle);
};
/**
* struct vmw_simple_resource - Kernel only side simple resource
* @res: The resource we derive from.
* @func: The method and member virtual table.
*/
struct vmw_simple_resource {
struct vmw_resource res;
const struct vmw_simple_resource_func *func;
};
int vmw_resource_alloc_id(struct vmw_resource *res);
void vmw_resource_release_id(struct vmw_resource *res);
int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
@ -91,4 +122,13 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
const struct vmw_res_func *func);
void vmw_resource_activate(struct vmw_resource *res,
void (*hw_destroy) (struct vmw_resource *));
int
vmw_simple_resource_create_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file_priv,
const struct vmw_simple_resource_func *func);
struct vmw_resource *
vmw_simple_resource_lookup(struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_simple_resource_func *func);
#endif

View File

@ -27,6 +27,8 @@
#include "vmwgfx_kms.h"
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#define vmw_crtc_to_sou(x) \
@ -203,203 +205,116 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
}
/**
* Free the backing store.
* vmw_sou_crtc_mode_set_nofb - Create new screen
*
* @crtc: CRTC associated with the new screen
*
* This function creates/destroys a screen. This function cannot fail, so if
* somehow we run into a failure, just do the best we can to get out.
*/
static void vmw_sou_backing_free(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou)
{
vmw_dmabuf_unreference(&sou->buffer);
sou->buffer_size = 0;
}
/**
* Allocate the backing store for the buffer.
*/
static int vmw_sou_backing_alloc(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou,
unsigned long size)
{
int ret;
if (sou->buffer_size == size)
return 0;
if (sou->buffer)
vmw_sou_backing_free(dev_priv, sou);
sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL);
if (unlikely(sou->buffer == NULL))
return -ENOMEM;
/* After we have alloced the backing store might not be able to
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
&vmw_vram_ne_placement,
false, &vmw_dmabuf_bo_free);
vmw_overlay_resume_all(dev_priv);
if (unlikely(ret != 0))
sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
else
sou->buffer_size = size;
return ret;
}
static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct vmw_private *dev_priv;
struct vmw_screen_object_unit *sou;
struct drm_connector *connector;
struct drm_display_mode *mode;
struct drm_encoder *encoder;
struct vmw_framebuffer *vfb;
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
int ret = 0;
struct drm_plane_state *ps;
struct vmw_plane_state *vps;
int ret;
if (!set)
return -EINVAL;
if (!set->crtc)
return -EINVAL;
/* get the sou */
crtc = set->crtc;
sou = vmw_crtc_to_sou(crtc);
vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
dev_priv = vmw_priv(crtc->dev);
ps = crtc->primary->state;
fb = ps->fb;
vps = vmw_plane_state_to_vps(ps);
if (set->num_connectors > 1) {
DRM_ERROR("Too many connectors\n");
return -EINVAL;
}
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
if (set->num_connectors == 1 &&
set->connectors[0] != &sou->base.connector) {
DRM_ERROR("Connector doesn't match %p %p\n",
set->connectors[0], &sou->base.connector);
return -EINVAL;
}
/* Only one active implicit frame-buffer at a time. */
mutex_lock(&dev_priv->global_kms_state_mutex);
if (sou->base.is_implicit &&
dev_priv->implicit_fb && vfb &&
!(dev_priv->num_implicit == 1 &&
sou->base.active_implicit) &&
dev_priv->implicit_fb != vfb) {
mutex_unlock(&dev_priv->global_kms_state_mutex);
DRM_ERROR("Multiple implicit framebuffers not supported.\n");
return -EINVAL;
}
mutex_unlock(&dev_priv->global_kms_state_mutex);
/* since they always map one to one these are safe */
connector = &sou->base.connector;
encoder = &sou->base.encoder;
/* should we turn the crtc off */
if (set->num_connectors == 0 || !set->mode || !set->fb) {
if (sou->defined) {
ret = vmw_sou_fifo_destroy(dev_priv, sou);
/* the hardware has hung don't do anything more */
if (unlikely(ret != 0))
return ret;
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->primary->fb = NULL;
crtc->x = 0;
crtc->y = 0;
crtc->enabled = false;
vmw_kms_del_active(dev_priv, &sou->base);
vmw_sou_backing_free(dev_priv, sou);
return 0;
if (ret) {
DRM_ERROR("Failed to destroy Screen Object\n");
return;
}
}
if (vfb) {
sou->buffer = vps->dmabuf;
sou->buffer_size = vps->dmabuf_size;
/* we now know we want to set a mode */
mode = set->mode;
fb = set->fb;
if (set->x + mode->hdisplay > fb->width ||
set->y + mode->vdisplay > fb->height) {
DRM_ERROR("set outside of framebuffer\n");
return -EINVAL;
}
vmw_svga_enable(dev_priv);
if (mode->hdisplay != crtc->mode.hdisplay ||
mode->vdisplay != crtc->mode.vdisplay) {
/* no need to check if depth is different, because backing
* store depth is forced to 4 by the device.
*/
ret = vmw_sou_fifo_destroy(dev_priv, sou);
/* the hardware has hung don't do anything more */
if (unlikely(ret != 0))
return ret;
vmw_sou_backing_free(dev_priv, sou);
}
if (!sou->buffer) {
/* forced to depth 4 by the device */
size_t size = mode->hdisplay * mode->vdisplay * 4;
ret = vmw_sou_backing_alloc(dev_priv, sou, size);
if (unlikely(ret != 0))
return ret;
}
ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode);
if (unlikely(ret != 0)) {
/*
* We are in a bit of a situation here, the hardware has
* hung and we may or may not have a buffer hanging of
* the screen object, best thing to do is not do anything
* if we where defined, if not just turn the crtc of.
* Not what userspace wants but it needs to htfu.
*/
if (sou->defined)
return ret;
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->primary->fb = NULL;
crtc->x = 0;
crtc->y = 0;
crtc->enabled = false;
return ret;
}
ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
&crtc->mode);
if (ret)
DRM_ERROR("Failed to define Screen Object %dx%d\n",
crtc->x, crtc->y);
vmw_kms_add_active(dev_priv, &sou->base, vfb);
} else {
sou->buffer = NULL;
sou->buffer_size = 0;
connector->encoder = encoder;
encoder->crtc = crtc;
crtc->mode = *mode;
crtc->primary->fb = fb;
crtc->x = set->x;
crtc->y = set->y;
crtc->enabled = true;
vmw_kms_del_active(dev_priv, &sou->base);
}
}
return 0;
/**
* vmw_sou_crtc_helper_prepare - Noop
*
* @crtc: CRTC associated with the new screen
*
* Prepares the CRTC for a mode set, but we don't need to do anything here.
*/
static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
{
}
/**
* vmw_sou_crtc_helper_commit - Noop
*
* @crtc: CRTC associated with the new screen
*
* This is called after a mode set has been completed.
*/
static void vmw_sou_crtc_helper_commit(struct drm_crtc *crtc)
{
}
/**
* vmw_sou_crtc_helper_disable - Turns off CRTC
*
* @crtc: CRTC to be turned off
*/
static void vmw_sou_crtc_helper_disable(struct drm_crtc *crtc)
{
struct vmw_private *dev_priv;
struct vmw_screen_object_unit *sou;
int ret;
if (!crtc) {
DRM_ERROR("CRTC is NULL\n");
return;
}
sou = vmw_crtc_to_sou(crtc);
dev_priv = vmw_priv(crtc->dev);
if (sou->defined) {
ret = vmw_sou_fifo_destroy(dev_priv, sou);
if (ret)
DRM_ERROR("Failed to destroy Screen Object\n");
}
}
static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_framebuffer *new_fb,
struct drm_pending_vblank_event *event,
uint32_t flags)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
struct vmw_fence_obj *fence = NULL;
struct drm_vmw_rect vclips;
int ret;
@ -407,7 +322,12 @@ static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
if (!vmw_kms_crtc_flippable(dev_priv, crtc))
return -EINVAL;
crtc->primary->fb = fb;
flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
ret = drm_atomic_helper_page_flip(crtc, new_fb, NULL, flags);
if (ret) {
DRM_ERROR("Page flip error %d.\n", ret);
return ret;
}
/* do a full screen dirty update */
vclips.x = crtc->x;
@ -454,16 +374,17 @@ static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
return ret;
out_no_fence:
crtc->primary->fb = old_fb;
drm_atomic_set_fb_for_plane(crtc->primary->state, old_fb);
return ret;
}
static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.cursor_set2 = vmw_du_crtc_cursor_set2,
.cursor_move = vmw_du_crtc_cursor_move,
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_sou_crtc_destroy,
.set_config = vmw_sou_crtc_set_config,
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = vmw_kms_set_config,
.page_flip = vmw_sou_crtc_page_flip,
};
@ -495,15 +416,180 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.fill_modes = vmw_du_connector_fill_modes,
.set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_set_property = vmw_du_connector_atomic_set_property,
.atomic_get_property = vmw_du_connector_atomic_get_property,
};
static const struct
drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
.best_encoder = drm_atomic_helper_best_encoder,
};
/*
* Screen Object Display Plane Functions
*/
/**
* vmw_sou_primary_plane_cleanup_fb - Frees sou backing buffer
*
* @plane: display plane
* @old_state: Contains the FB to clean up
*
* Unpins the display surface
*
* Returns 0 on success
*/
static void
vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
vmw_dmabuf_unreference(&vps->dmabuf);
vps->dmabuf_size = 0;
vmw_du_plane_cleanup_fb(plane, old_state);
}
/**
* vmw_sou_primary_plane_prepare_fb - allocate backing buffer
*
* @plane: display plane
* @new_state: info on the new plane state, including the FB
*
* The SOU backing buffer is our equivalent of the display plane.
*
* Returns 0 on success
*/
static int
vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_framebuffer *new_fb = new_state->fb;
struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
struct vmw_private *dev_priv;
size_t size;
int ret;
if (!new_fb) {
vmw_dmabuf_unreference(&vps->dmabuf);
vps->dmabuf_size = 0;
return 0;
}
size = new_state->crtc_w * new_state->crtc_h * 4;
if (vps->dmabuf) {
if (vps->dmabuf_size == size)
return 0;
vmw_dmabuf_unreference(&vps->dmabuf);
vps->dmabuf_size = 0;
}
vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
if (!vps->dmabuf)
return -ENOMEM;
dev_priv = vmw_priv(crtc->dev);
vmw_svga_enable(dev_priv);
/* After we have alloced the backing store might not be able to
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
&vmw_vram_ne_placement,
false, &vmw_dmabuf_bo_free);
vmw_overlay_resume_all(dev_priv);
if (ret != 0)
vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
else
vps->dmabuf_size = size;
return ret;
}
static void
vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_crtc *crtc = plane->state->crtc;
if (crtc)
crtc->primary->fb = plane->state->fb;
}
static const struct drm_plane_funcs vmw_sou_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = vmw_du_primary_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
};
static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = vmw_du_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
};
/*
* Atomic Helpers
*/
static const struct
drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
.atomic_check = vmw_du_cursor_plane_atomic_check,
.atomic_update = vmw_du_cursor_plane_atomic_update,
.prepare_fb = vmw_du_cursor_plane_prepare_fb,
.cleanup_fb = vmw_du_plane_cleanup_fb,
};
static const struct
drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = {
.atomic_check = vmw_du_primary_plane_atomic_check,
.atomic_update = vmw_sou_primary_plane_atomic_update,
.prepare_fb = vmw_sou_primary_plane_prepare_fb,
.cleanup_fb = vmw_sou_primary_plane_cleanup_fb,
};
static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
.prepare = vmw_sou_crtc_helper_prepare,
.commit = vmw_sou_crtc_helper_commit,
.disable = vmw_sou_crtc_helper_disable,
.mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
.atomic_flush = vmw_du_crtc_atomic_flush,
};
static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_screen_object_unit *sou;
struct drm_device *dev = dev_priv->dev;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_plane *primary, *cursor;
struct drm_crtc *crtc;
int ret;
sou = kzalloc(sizeof(*sou), GFP_KERNEL);
if (!sou)
@ -513,27 +599,93 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
crtc = &sou->base.crtc;
encoder = &sou->base.encoder;
connector = &sou->base.connector;
primary = &sou->base.primary;
cursor = &sou->base.cursor;
sou->base.active_implicit = false;
sou->base.pref_active = (unit == 0);
sou->base.pref_width = dev_priv->initial_width;
sou->base.pref_height = dev_priv->initial_height;
sou->base.pref_mode = NULL;
/*
* Remove this after enabling atomic because property values can
* only exist in a state object
*/
sou->base.is_implicit = false;
drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
connector->status = vmw_du_connector_detect(connector, true);
/* Initialize primary plane */
vmw_du_plane_reset(primary);
drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
ret = drm_universal_plane_init(dev, &sou->base.primary,
0, &vmw_sou_plane_funcs,
vmw_primary_plane_formats,
ARRAY_SIZE(vmw_primary_plane_formats),
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
DRM_ERROR("Failed to initialize primary plane");
goto err_free;
}
drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
/* Initialize cursor plane */
vmw_du_plane_reset(cursor);
ret = drm_universal_plane_init(dev, &sou->base.cursor,
0, &vmw_sou_cursor_funcs,
vmw_cursor_plane_formats,
ARRAY_SIZE(vmw_cursor_plane_formats),
DRM_PLANE_TYPE_CURSOR, NULL);
if (ret) {
DRM_ERROR("Failed to initialize cursor plane");
drm_plane_cleanup(&sou->base.primary);
goto err_free;
}
drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
vmw_du_connector_reset(connector);
ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
DRM_ERROR("Failed to initialize connector\n");
goto err_free;
}
drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, true);
vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
drm_mode_connector_attach_encoder(connector, encoder);
if (ret) {
DRM_ERROR("Failed to initialize encoder\n");
goto err_free_connector;
}
(void) drm_mode_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
(void) drm_connector_register(connector);
ret = drm_connector_register(connector);
if (ret) {
DRM_ERROR("Failed to register connector\n");
goto err_free_encoder;
}
drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
vmw_du_crtc_reset(crtc);
ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
&sou->base.cursor,
&vmw_screen_object_crtc_funcs, NULL);
if (ret) {
DRM_ERROR("Failed to initialize CRTC\n");
goto err_free_unregister;
}
drm_crtc_helper_add(crtc, &vmw_sou_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(crtc, 256);
@ -550,6 +702,16 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
sou->base.is_implicit);
return 0;
err_free_unregister:
drm_connector_unregister(connector);
err_free_encoder:
drm_encoder_cleanup(encoder);
err_free_connector:
drm_connector_cleanup(connector);
err_free:
kfree(sou);
return ret;
}
int vmw_kms_sou_init_display(struct vmw_private *dev_priv)

View File

@ -0,0 +1,256 @@
/**************************************************************************
*
* Copyright © 2016 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
/**
* struct vmw_user_simple_resource - User-space simple resource struct
*
* @base: The TTM base object implementing user-space visibility.
* @account_size: How much memory was accounted for this object.
* @simple: The embedded struct vmw_simple_resource.
*/
struct vmw_user_simple_resource {
struct ttm_base_object base;
size_t account_size;
struct vmw_simple_resource simple;
/*
* Nothing to be placed after @simple, since size of @simple is
* unknown.
*/
};
/**
* vmw_simple_resource_init - Initialize a simple resource object.
*
* @dev_priv: Pointer to a struct device private.
* @simple: The struct vmw_simple_resource to initialize.
* @data: Data passed to the information initialization function.
* @res_free: Function pointer to destroy the simple resource.
*
* Returns:
* 0 if succeeded.
* Negative error value if error, in which case the resource will have been
* freed.
*/
static int vmw_simple_resource_init(struct vmw_private *dev_priv,
struct vmw_simple_resource *simple,
void *data,
void (*res_free)(struct vmw_resource *res))
{
struct vmw_resource *res = &simple->res;
int ret;
ret = vmw_resource_init(dev_priv, res, false, res_free,
&simple->func->res_func);
if (ret) {
res_free(res);
return ret;
}
ret = simple->func->init(res, data);
if (ret) {
vmw_resource_unreference(&res);
return ret;
}
vmw_resource_activate(&simple->res, simple->func->hw_destroy);
return 0;
}
/**
* vmw_simple_resource_free - Free a simple resource object.
*
* @res: The struct vmw_resource member of the simple resource object.
*
* Frees memory and memory accounting for the object.
*/
static void vmw_simple_resource_free(struct vmw_resource *res)
{
struct vmw_user_simple_resource *usimple =
container_of(res, struct vmw_user_simple_resource,
simple.res);
struct vmw_private *dev_priv = res->dev_priv;
size_t size = usimple->account_size;
ttm_base_object_kfree(usimple, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
/**
* vmw_simple_resource_base_release - TTM object release callback
*
* @p_base: The struct ttm_base_object member of the simple resource object.
*
* Called when the last reference to the embedded struct ttm_base_object is
* gone. Typically results in an object free, unless there are other
* references to the embedded struct vmw_resource.
*/
static void vmw_simple_resource_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_simple_resource *usimple =
container_of(base, struct vmw_user_simple_resource, base);
struct vmw_resource *res = &usimple->simple.res;
*p_base = NULL;
vmw_resource_unreference(&res);
}
/**
* vmw_simple_resource_create_ioctl - Helper to set up an ioctl function to
* create a struct vmw_simple_resource.
*
* @dev: Pointer to a struct drm device.
* @data: Ioctl argument.
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @func: Pointer to a struct vmw_simple_resource_func identifying the
* simple resource type.
*
* Returns:
* 0 if success,
* Negative error value on error.
*/
int
vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv,
const struct vmw_simple_resource_func *func)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_simple_resource *usimple;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
size_t alloc_size;
size_t account_size;
int ret;
alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
func->size;
account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (ret)
return ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
false, true);
ttm_read_unlock(&dev_priv->reservation_sem);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for %s"
" creation.\n", func->res_func.type_name);
goto out_ret;
}
usimple = kzalloc(alloc_size, GFP_KERNEL);
if (!usimple) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
account_size);
ret = -ENOMEM;
goto out_ret;
}
usimple->simple.func = func;
usimple->account_size = account_size;
res = &usimple->simple.res;
usimple->base.shareable = false;
usimple->base.tfile = NULL;
/*
* From here on, the destructor takes over resource freeing.
*/
ret = vmw_simple_resource_init(dev_priv, &usimple->simple,
data, vmw_simple_resource_free);
if (ret)
goto out_ret;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &usimple->base, false,
func->ttm_res_type,
&vmw_simple_resource_base_release, NULL);
if (ret) {
vmw_resource_unreference(&tmp);
goto out_err;
}
func->set_arg_handle(data, usimple->base.hash.key);
out_err:
vmw_resource_unreference(&res);
out_ret:
return ret;
}
/**
* vmw_simple_resource_lookup - Look up a simple resource from its user-space
* handle.
*
* @tfile: struct ttm_object_file identifying the caller.
* @handle: The user-space handle.
* @func: The struct vmw_simple_resource_func identifying the simple resource
* type.
*
* Returns: Refcounted pointer to the embedded struct vmw_resource if
* successfule. Error pointer otherwise.
*/
struct vmw_resource *
vmw_simple_resource_lookup(struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_simple_resource_func *func)
{
struct vmw_user_simple_resource *usimple;
struct ttm_base_object *base;
struct vmw_resource *res;
base = ttm_base_object_lookup(tfile, handle);
if (!base) {
DRM_ERROR("Invalid %s handle 0x%08lx.\n",
func->res_func.type_name,
(unsigned long) handle);
return ERR_PTR(-ESRCH);
}
if (ttm_base_object_type(base) != func->ttm_res_type) {
ttm_base_object_unref(&base);
DRM_ERROR("Invalid type of %s handle 0x%08lx.\n",
func->res_func.type_name,
(unsigned long) handle);
return ERR_PTR(-EINVAL);
}
usimple = container_of(base, typeof(*usimple), base);
res = vmw_resource_reference(&usimple->simple.res);
ttm_base_object_unref(&base);
return res;
}

File diff suppressed because it is too large Load Diff

View File

@ -814,7 +814,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
} else {
srf->snooper.image = NULL;
}
srf->snooper.crtc = NULL;
user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL;
@ -1480,10 +1479,24 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
*srf_out = NULL;
if (for_scanout) {
uint32_t max_width, max_height;
if (!svga3dsurface_is_screen_target_format(format)) {
DRM_ERROR("Invalid Screen Target surface format.");
return -EINVAL;
}
max_width = min(dev_priv->texture_max_width,
dev_priv->stdu_max_width);
max_height = min(dev_priv->texture_max_height,
dev_priv->stdu_max_height);
if (size.width > max_width || size.height > max_height) {
DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u",
size.width, size.height,
max_width, max_height);
return -EINVAL;
}
} else {
const struct svga3d_surface_desc *desc;

View File

@ -0,0 +1,168 @@
/**************************************************************************
*
* Copyright © 2012-2016 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
/**
* struct vmw_stream - Overlay stream simple resource.
* @sres: The simple resource we derive from.
* @stream_id: The overlay stream id.
*/
struct vmw_stream {
struct vmw_simple_resource sres;
u32 stream_id;
};
/**
* vmw_stream - Typecast a struct vmw_resource to a struct vmw_stream.
* @res: Pointer to the struct vmw_resource.
*
* Returns: Returns a pointer to the struct vmw_stream.
*/
static struct vmw_stream *
vmw_stream(struct vmw_resource *res)
{
return container_of(res, struct vmw_stream, sres.res);
}
/***************************************************************************
* Simple resource callbacks for struct vmw_stream
**************************************************************************/
static void vmw_stream_hw_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_stream *stream = vmw_stream(res);
int ret;
ret = vmw_overlay_unref(dev_priv, stream->stream_id);
WARN_ON_ONCE(ret != 0);
}
static int vmw_stream_init(struct vmw_resource *res, void *data)
{
struct vmw_stream *stream = vmw_stream(res);
return vmw_overlay_claim(res->dev_priv, &stream->stream_id);
}
static void vmw_stream_set_arg_handle(void *data, u32 handle)
{
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
arg->stream_id = handle;
}
static const struct vmw_simple_resource_func va_stream_func = {
.res_func = {
.res_type = vmw_res_stream,
.needs_backup = false,
.may_evict = false,
.type_name = "overlay stream",
.backup_placement = NULL,
.create = NULL,
.destroy = NULL,
.bind = NULL,
.unbind = NULL
},
.ttm_res_type = VMW_RES_STREAM,
.size = sizeof(struct vmw_stream),
.init = vmw_stream_init,
.hw_destroy = vmw_stream_hw_destroy,
.set_arg_handle = vmw_stream_set_arg_handle,
};
/***************************************************************************
* End simple resource callbacks for struct vmw_stream
**************************************************************************/
/**
* vmw_stream_unref_ioctl - Ioctl to unreference a user-space handle to
* a struct vmw_stream.
* @dev: Pointer to the drm device.
* @data: The ioctl argument
* @file_priv: Pointer to a struct drm_file identifying the caller.
*
* Return:
* 0 if successful.
* Negative error value on failure.
*/
int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
arg->stream_id, TTM_REF_USAGE);
}
/**
* vmw_stream_claim_ioctl - Ioctl to claim a struct vmw_stream overlay.
* @dev: Pointer to the drm device.
* @data: The ioctl argument
* @file_priv: Pointer to a struct drm_file identifying the caller.
*
* Return:
* 0 if successful.
* Negative error value on failure.
*/
int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return vmw_simple_resource_create_ioctl(dev, data, file_priv,
&va_stream_func);
}
/**
* vmw_user_stream_lookup - Look up a struct vmw_user_stream from a handle.
* @dev_priv: Pointer to a struct vmw_private.
* @tfile: struct ttm_object_file identifying the caller.
* @inout_id: In: The user-space handle. Out: The stream id.
* @out: On output contains a refcounted pointer to the embedded
* struct vmw_resource.
*
* Return:
* 0 if successful.
* Negative error value on failure.
*/
int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t *inout_id, struct vmw_resource **out)
{
struct vmw_stream *stream;
struct vmw_resource *res =
vmw_simple_resource_lookup(tfile, *inout_id, &va_stream_func);
if (IS_ERR(res))
return PTR_ERR(res);
stream = vmw_stream(res);
*inout_id = stream->stream_id;
*out = res;
return 0;
}

View File

@ -41,6 +41,7 @@ extern "C" {
#define DRM_VMW_GET_PARAM 0
#define DRM_VMW_ALLOC_DMABUF 1
#define DRM_VMW_UNREF_DMABUF 2
#define DRM_VMW_HANDLE_CLOSE 2
#define DRM_VMW_CURSOR_BYPASS 3
/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
#define DRM_VMW_CONTROL_STREAM 4
@ -1092,6 +1093,29 @@ union drm_vmw_extended_context_arg {
struct drm_vmw_context_arg rep;
};
/*************************************************************************/
/*
* DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
* underlying resource.
*
* Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
* The ioctl arguments therefore need to be identical in layout.
*
*/
/**
* struct drm_vmw_handle_close_arg
*
* @handle: Handle to close.
*
* Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
*/
struct drm_vmw_handle_close_arg {
__u32 handle;
__u32 pad64;
};
#if defined(__cplusplus)
}
#endif