Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "A few regression fixes already, one for my own stupidity, and mgag200 typo fix, vmwgfx fixes and ttm regression fixes, and a radeon register checker update for older cards to handle geom shaders" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: drm/radeon: allow geom rings to be setup on r600/r700 (v2) drm/mgag200,ast,cirrus: fix regression with drm_can_sleep conversion drm/ttm: Don't clear page metadata of imported sg pages drm/ttm: Fix TTM object open regression vmwgfx: Fix unitialized stack read in vmw_setup_otable_base drm/vmwgfx: Reemit context bindings when necessary v2 drm/vmwgfx: Detect old user-space drivers and set up legacy emulation v2 drm/vmwgfx: Emulate legacy shaders on guest-backed devices v2 drm/vmwgfx: Fix legacy surface reference size copyback drm/vmwgfx: Fix SET_SHADER_CONST emulation on guest-backed devices drm/vmwgfx: Fix regression caused by "drm/ttm: make ttm reservation calls behave like reservation calls" drm/vmwgfx: Don't commit staged bindings if execbuf fails drm/mgag200: fix typo causing bw limits to be ignored on some chips
This commit is contained in:
commit
65f0505b1b
|
@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
|
||||||
* then the BO is being moved and we should
|
* then the BO is being moved and we should
|
||||||
* store up the damage until later.
|
* store up the damage until later.
|
||||||
*/
|
*/
|
||||||
if (!drm_can_sleep())
|
if (drm_can_sleep())
|
||||||
ret = ast_bo_reserve(bo, true);
|
ret = ast_bo_reserve(bo, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (ret != -EBUSY)
|
if (ret != -EBUSY)
|
||||||
|
|
|
@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
|
||||||
* then the BO is being moved and we should
|
* then the BO is being moved and we should
|
||||||
* store up the damage until later.
|
* store up the damage until later.
|
||||||
*/
|
*/
|
||||||
if (!drm_can_sleep())
|
if (drm_can_sleep())
|
||||||
ret = cirrus_bo_reserve(bo, true);
|
ret = cirrus_bo_reserve(bo, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (ret != -EBUSY)
|
if (ret != -EBUSY)
|
||||||
|
|
|
@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
|
||||||
* then the BO is being moved and we should
|
* then the BO is being moved and we should
|
||||||
* store up the damage until later.
|
* store up the damage until later.
|
||||||
*/
|
*/
|
||||||
if (!drm_can_sleep())
|
if (drm_can_sleep())
|
||||||
ret = mgag200_bo_reserve(bo, true);
|
ret = mgag200_bo_reserve(bo, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (ret != -EBUSY)
|
if (ret != -EBUSY)
|
||||||
|
|
|
@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
|
||||||
(mga_vga_calculate_mode_bandwidth(mode, bpp)
|
(mga_vga_calculate_mode_bandwidth(mode, bpp)
|
||||||
> (32700 * 1024))) {
|
> (32700 * 1024))) {
|
||||||
return MODE_BANDWIDTH;
|
return MODE_BANDWIDTH;
|
||||||
} else if (mode->type == G200_EH &&
|
} else if (mdev->type == G200_EH &&
|
||||||
(mga_vga_calculate_mode_bandwidth(mode, bpp)
|
(mga_vga_calculate_mode_bandwidth(mode, bpp)
|
||||||
> (37500 * 1024))) {
|
> (37500 * 1024))) {
|
||||||
return MODE_BANDWIDTH;
|
return MODE_BANDWIDTH;
|
||||||
} else if (mode->type == G200_ER &&
|
} else if (mdev->type == G200_ER &&
|
||||||
(mga_vga_calculate_mode_bandwidth(mode,
|
(mga_vga_calculate_mode_bandwidth(mode,
|
||||||
bpp) > (55000 * 1024))) {
|
bpp) > (55000 * 1024))) {
|
||||||
return MODE_BANDWIDTH;
|
return MODE_BANDWIDTH;
|
||||||
|
|
|
@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||||
case R_008C64_SQ_VSTMP_RING_SIZE:
|
case R_008C64_SQ_VSTMP_RING_SIZE:
|
||||||
case R_0288C8_SQ_GS_VERT_ITEMSIZE:
|
case R_0288C8_SQ_GS_VERT_ITEMSIZE:
|
||||||
/* get value to populate the IB don't remove */
|
/* get value to populate the IB don't remove */
|
||||||
tmp =radeon_get_ib_value(p, idx);
|
/*tmp =radeon_get_ib_value(p, idx);
|
||||||
ib[idx] = 0;
|
ib[idx] = 0;*/
|
||||||
|
break;
|
||||||
|
case SQ_ESGS_RING_BASE:
|
||||||
|
case SQ_GSVS_RING_BASE:
|
||||||
|
case SQ_ESTMP_RING_BASE:
|
||||||
|
case SQ_GSTMP_RING_BASE:
|
||||||
|
case SQ_PSTMP_RING_BASE:
|
||||||
|
case SQ_VSTMP_RING_BASE:
|
||||||
|
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||||
|
if (r) {
|
||||||
|
dev_warn(p->dev, "bad SET_CONTEXT_REG "
|
||||||
|
"0x%04X\n", reg);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
|
||||||
break;
|
break;
|
||||||
case SQ_CONFIG:
|
case SQ_CONFIG:
|
||||||
track->sq_config = radeon_get_ib_value(p, idx);
|
track->sq_config = radeon_get_ib_value(p, idx);
|
||||||
|
|
|
@ -78,9 +78,10 @@
|
||||||
* 2.34.0 - Add CIK tiling mode array query
|
* 2.34.0 - Add CIK tiling mode array query
|
||||||
* 2.35.0 - Add CIK macrotile mode array query
|
* 2.35.0 - Add CIK macrotile mode array query
|
||||||
* 2.36.0 - Fix CIK DCE tiling setup
|
* 2.36.0 - Fix CIK DCE tiling setup
|
||||||
|
* 2.37.0 - allow GS ring setup on r6xx/r7xx
|
||||||
*/
|
*/
|
||||||
#define KMS_DRIVER_MAJOR 2
|
#define KMS_DRIVER_MAJOR 2
|
||||||
#define KMS_DRIVER_MINOR 36
|
#define KMS_DRIVER_MINOR 37
|
||||||
#define KMS_DRIVER_PATCHLEVEL 0
|
#define KMS_DRIVER_PATCHLEVEL 0
|
||||||
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
||||||
int radeon_driver_unload_kms(struct drm_device *dev);
|
int radeon_driver_unload_kms(struct drm_device *dev);
|
||||||
|
|
|
@ -18,6 +18,7 @@ r600 0x9400
|
||||||
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
|
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
|
||||||
0x00028A40 VGT_GS_MODE
|
0x00028A40 VGT_GS_MODE
|
||||||
0x00028A6C VGT_GS_OUT_PRIM_TYPE
|
0x00028A6C VGT_GS_OUT_PRIM_TYPE
|
||||||
|
0x00028B38 VGT_GS_MAX_VERT_OUT
|
||||||
0x000088C8 VGT_GS_PER_ES
|
0x000088C8 VGT_GS_PER_ES
|
||||||
0x000088E8 VGT_GS_PER_VS
|
0x000088E8 VGT_GS_PER_VS
|
||||||
0x000088D4 VGT_GS_VERTEX_REUSE
|
0x000088D4 VGT_GS_VERTEX_REUSE
|
||||||
|
|
|
@ -292,7 +292,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||||
|
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||||
if (!kref_get_unless_zero(&ref->kref)) {
|
if (kref_get_unless_zero(&ref->kref)) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -380,6 +380,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
|
||||||
pgoff_t i;
|
pgoff_t i;
|
||||||
struct page **page = ttm->pages;
|
struct page **page = ttm->pages;
|
||||||
|
|
||||||
|
if (ttm->page_flags & TTM_PAGE_FLAG_SG)
|
||||||
|
return;
|
||||||
|
|
||||||
for (i = 0; i < ttm->num_pages; ++i) {
|
for (i = 0; i < ttm->num_pages; ++i) {
|
||||||
(*page)->mapping = NULL;
|
(*page)->mapping = NULL;
|
||||||
(*page++)->index = 0;
|
(*page++)->index = 0;
|
||||||
|
|
|
@ -2583,4 +2583,28 @@ typedef union {
|
||||||
float f;
|
float f;
|
||||||
} SVGA3dDevCapResult;
|
} SVGA3dDevCapResult;
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
SVGA3DCAPS_RECORD_UNKNOWN = 0,
|
||||||
|
SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
|
||||||
|
SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
|
||||||
|
SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
|
||||||
|
} SVGA3dCapsRecordType;
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCapsRecordHeader {
|
||||||
|
uint32 length;
|
||||||
|
SVGA3dCapsRecordType type;
|
||||||
|
}
|
||||||
|
SVGA3dCapsRecordHeader;
|
||||||
|
|
||||||
|
typedef
|
||||||
|
struct SVGA3dCapsRecord {
|
||||||
|
SVGA3dCapsRecordHeader header;
|
||||||
|
uint32 data[1];
|
||||||
|
}
|
||||||
|
SVGA3dCapsRecord;
|
||||||
|
|
||||||
|
|
||||||
|
typedef uint32 SVGA3dCapPair[2];
|
||||||
|
|
||||||
#endif /* _SVGA3D_REG_H_ */
|
#endif /* _SVGA3D_REG_H_ */
|
||||||
|
|
|
@ -37,7 +37,7 @@ struct vmw_user_context {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
|
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
|
||||||
|
|
||||||
static void vmw_user_context_free(struct vmw_resource *res);
|
static void vmw_user_context_free(struct vmw_resource *res);
|
||||||
static struct vmw_resource *
|
static struct vmw_resource *
|
||||||
|
@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
|
||||||
bool readback,
|
bool readback,
|
||||||
struct ttm_validate_buffer *val_buf);
|
struct ttm_validate_buffer *val_buf);
|
||||||
static int vmw_gb_context_destroy(struct vmw_resource *res);
|
static int vmw_gb_context_destroy(struct vmw_resource *res);
|
||||||
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
|
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
|
||||||
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
|
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
|
||||||
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
|
bool rebind);
|
||||||
|
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
|
||||||
|
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
|
||||||
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
|
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
|
||||||
static uint64_t vmw_user_context_size;
|
static uint64_t vmw_user_context_size;
|
||||||
|
|
||||||
|
@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
|
||||||
|
|
||||||
if (res->func->destroy == vmw_gb_context_destroy) {
|
if (res->func->destroy == vmw_gb_context_destroy) {
|
||||||
mutex_lock(&dev_priv->cmdbuf_mutex);
|
mutex_lock(&dev_priv->cmdbuf_mutex);
|
||||||
|
mutex_lock(&dev_priv->binding_mutex);
|
||||||
|
(void) vmw_context_binding_state_kill
|
||||||
|
(&container_of(res, struct vmw_user_context, res)->cbs);
|
||||||
(void) vmw_gb_context_destroy(res);
|
(void) vmw_gb_context_destroy(res);
|
||||||
if (dev_priv->pinned_bo != NULL &&
|
if (dev_priv->pinned_bo != NULL &&
|
||||||
!dev_priv->query_cid_valid)
|
!dev_priv->query_cid_valid)
|
||||||
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
|
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
|
||||||
|
mutex_unlock(&dev_priv->binding_mutex);
|
||||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
|
||||||
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||||
|
|
||||||
mutex_lock(&dev_priv->binding_mutex);
|
mutex_lock(&dev_priv->binding_mutex);
|
||||||
vmw_context_binding_state_kill(&uctx->cbs);
|
vmw_context_binding_state_scrub(&uctx->cbs);
|
||||||
|
|
||||||
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
|
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
|
||||||
|
|
||||||
|
@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
|
||||||
SVGA3dCmdHeader header;
|
SVGA3dCmdHeader header;
|
||||||
SVGA3dCmdDestroyGBContext body;
|
SVGA3dCmdDestroyGBContext body;
|
||||||
} *cmd;
|
} *cmd;
|
||||||
struct vmw_user_context *uctx =
|
|
||||||
container_of(res, struct vmw_user_context, res);
|
|
||||||
|
|
||||||
BUG_ON(!list_empty(&uctx->cbs.list));
|
|
||||||
|
|
||||||
if (likely(res->id == -1))
|
if (likely(res->id == -1))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -528,8 +530,9 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
|
||||||
* vmw_context_scrub_shader - scrub a shader binding from a context.
|
* vmw_context_scrub_shader - scrub a shader binding from a context.
|
||||||
*
|
*
|
||||||
* @bi: single binding information.
|
* @bi: single binding information.
|
||||||
|
* @rebind: Whether to issue a bind instead of scrub command.
|
||||||
*/
|
*/
|
||||||
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
|
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
|
||||||
{
|
{
|
||||||
struct vmw_private *dev_priv = bi->ctx->dev_priv;
|
struct vmw_private *dev_priv = bi->ctx->dev_priv;
|
||||||
struct {
|
struct {
|
||||||
|
@ -548,7 +551,8 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
|
||||||
cmd->header.size = sizeof(cmd->body);
|
cmd->header.size = sizeof(cmd->body);
|
||||||
cmd->body.cid = bi->ctx->id;
|
cmd->body.cid = bi->ctx->id;
|
||||||
cmd->body.type = bi->i1.shader_type;
|
cmd->body.type = bi->i1.shader_type;
|
||||||
cmd->body.shid = SVGA3D_INVALID_ID;
|
cmd->body.shid =
|
||||||
|
cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
|
||||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -559,8 +563,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
|
||||||
* from a context.
|
* from a context.
|
||||||
*
|
*
|
||||||
* @bi: single binding information.
|
* @bi: single binding information.
|
||||||
|
* @rebind: Whether to issue a bind instead of scrub command.
|
||||||
*/
|
*/
|
||||||
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
|
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
|
||||||
|
bool rebind)
|
||||||
{
|
{
|
||||||
struct vmw_private *dev_priv = bi->ctx->dev_priv;
|
struct vmw_private *dev_priv = bi->ctx->dev_priv;
|
||||||
struct {
|
struct {
|
||||||
|
@ -579,7 +585,8 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
|
||||||
cmd->header.size = sizeof(cmd->body);
|
cmd->header.size = sizeof(cmd->body);
|
||||||
cmd->body.cid = bi->ctx->id;
|
cmd->body.cid = bi->ctx->id;
|
||||||
cmd->body.type = bi->i1.rt_type;
|
cmd->body.type = bi->i1.rt_type;
|
||||||
cmd->body.target.sid = SVGA3D_INVALID_ID;
|
cmd->body.target.sid =
|
||||||
|
cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
|
||||||
cmd->body.target.face = 0;
|
cmd->body.target.face = 0;
|
||||||
cmd->body.target.mipmap = 0;
|
cmd->body.target.mipmap = 0;
|
||||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
@ -591,11 +598,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
|
||||||
* vmw_context_scrub_texture - scrub a texture binding from a context.
|
* vmw_context_scrub_texture - scrub a texture binding from a context.
|
||||||
*
|
*
|
||||||
* @bi: single binding information.
|
* @bi: single binding information.
|
||||||
|
* @rebind: Whether to issue a bind instead of scrub command.
|
||||||
*
|
*
|
||||||
* TODO: Possibly complement this function with a function that takes
|
* TODO: Possibly complement this function with a function that takes
|
||||||
* a list of texture bindings and combines them to a single command.
|
* a list of texture bindings and combines them to a single command.
|
||||||
*/
|
*/
|
||||||
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
|
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
|
||||||
|
bool rebind)
|
||||||
{
|
{
|
||||||
struct vmw_private *dev_priv = bi->ctx->dev_priv;
|
struct vmw_private *dev_priv = bi->ctx->dev_priv;
|
||||||
struct {
|
struct {
|
||||||
|
@ -619,7 +628,8 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
|
||||||
cmd->body.c.cid = bi->ctx->id;
|
cmd->body.c.cid = bi->ctx->id;
|
||||||
cmd->body.s1.stage = bi->i1.texture_stage;
|
cmd->body.s1.stage = bi->i1.texture_stage;
|
||||||
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
|
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
|
||||||
cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
|
cmd->body.s1.value =
|
||||||
|
cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
|
||||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -692,6 +702,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
|
||||||
vmw_context_binding_drop(loc);
|
vmw_context_binding_drop(loc);
|
||||||
|
|
||||||
loc->bi = *bi;
|
loc->bi = *bi;
|
||||||
|
loc->bi.scrubbed = false;
|
||||||
list_add_tail(&loc->ctx_list, &cbs->list);
|
list_add_tail(&loc->ctx_list, &cbs->list);
|
||||||
INIT_LIST_HEAD(&loc->res_list);
|
INIT_LIST_HEAD(&loc->res_list);
|
||||||
|
|
||||||
|
@ -727,12 +738,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
|
||||||
if (loc->bi.ctx != NULL)
|
if (loc->bi.ctx != NULL)
|
||||||
vmw_context_binding_drop(loc);
|
vmw_context_binding_drop(loc);
|
||||||
|
|
||||||
loc->bi = *bi;
|
if (bi->res != NULL) {
|
||||||
list_add_tail(&loc->ctx_list, &cbs->list);
|
loc->bi = *bi;
|
||||||
if (bi->res != NULL)
|
list_add_tail(&loc->ctx_list, &cbs->list);
|
||||||
list_add_tail(&loc->res_list, &bi->res->binding_head);
|
list_add_tail(&loc->res_list, &bi->res->binding_head);
|
||||||
else
|
}
|
||||||
INIT_LIST_HEAD(&loc->res_list);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -746,7 +756,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
|
||||||
*/
|
*/
|
||||||
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
|
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
|
||||||
{
|
{
|
||||||
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
|
if (!cb->bi.scrubbed) {
|
||||||
|
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
|
||||||
|
cb->bi.scrubbed = true;
|
||||||
|
}
|
||||||
vmw_context_binding_drop(cb);
|
vmw_context_binding_drop(cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -767,6 +780,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
|
||||||
vmw_context_binding_kill(entry);
|
vmw_context_binding_kill(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_binding_state_scrub - Scrub all bindings associated with a
|
||||||
|
* struct vmw_ctx_binding state structure.
|
||||||
|
*
|
||||||
|
* @cbs: Pointer to the context binding state tracker.
|
||||||
|
*
|
||||||
|
* Emits commands to scrub all bindings associated with the
|
||||||
|
* context binding state tracker.
|
||||||
|
*/
|
||||||
|
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
|
||||||
|
{
|
||||||
|
struct vmw_ctx_binding *entry;
|
||||||
|
|
||||||
|
list_for_each_entry(entry, &cbs->list, ctx_list) {
|
||||||
|
if (!entry->bi.scrubbed) {
|
||||||
|
(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
|
||||||
|
entry->bi.scrubbed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_context_binding_res_list_kill - Kill all bindings on a
|
* vmw_context_binding_res_list_kill - Kill all bindings on a
|
||||||
* resource binding list
|
* resource binding list
|
||||||
|
@ -784,6 +818,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head)
|
||||||
vmw_context_binding_kill(entry);
|
vmw_context_binding_kill(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_binding_res_list_scrub - Scrub all bindings on a
|
||||||
|
* resource binding list
|
||||||
|
*
|
||||||
|
* @head: list head of resource binding list
|
||||||
|
*
|
||||||
|
* Scrub all bindings associated with a specific resource. Typically
|
||||||
|
* called before the resource is evicted.
|
||||||
|
*/
|
||||||
|
void vmw_context_binding_res_list_scrub(struct list_head *head)
|
||||||
|
{
|
||||||
|
struct vmw_ctx_binding *entry;
|
||||||
|
|
||||||
|
list_for_each_entry(entry, head, res_list) {
|
||||||
|
if (!entry->bi.scrubbed) {
|
||||||
|
(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
|
||||||
|
entry->bi.scrubbed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_context_binding_state_transfer - Commit staged binding info
|
* vmw_context_binding_state_transfer - Commit staged binding info
|
||||||
*
|
*
|
||||||
|
@ -803,3 +858,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
|
||||||
list_for_each_entry_safe(entry, next, &from->list, ctx_list)
|
list_for_each_entry_safe(entry, next, &from->list, ctx_list)
|
||||||
vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
|
vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_rebind_all - Rebind all scrubbed bindings of a context
|
||||||
|
*
|
||||||
|
* @ctx: The context resource
|
||||||
|
*
|
||||||
|
* Walks through the context binding list and rebinds all scrubbed
|
||||||
|
* resources.
|
||||||
|
*/
|
||||||
|
int vmw_context_rebind_all(struct vmw_resource *ctx)
|
||||||
|
{
|
||||||
|
struct vmw_ctx_binding *entry;
|
||||||
|
struct vmw_user_context *uctx =
|
||||||
|
container_of(ctx, struct vmw_user_context, res);
|
||||||
|
struct vmw_ctx_binding_state *cbs = &uctx->cbs;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
list_for_each_entry(entry, &cbs->list, ctx_list) {
|
||||||
|
if (likely(!entry->bi.scrubbed))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
|
||||||
|
SVGA3D_INVALID_ID))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
entry->bi.scrubbed = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_context_binding_list - Return a list of context bindings
|
||||||
|
*
|
||||||
|
* @ctx: The context resource
|
||||||
|
*
|
||||||
|
* Returns the current list of bindings of the given context. Note that
|
||||||
|
* this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
|
||||||
|
*/
|
||||||
|
struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
|
||||||
|
{
|
||||||
|
return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
|
||||||
|
}
|
||||||
|
|
|
@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
|
||||||
drm_master_put(&vmw_fp->locked_master);
|
drm_master_put(&vmw_fp->locked_master);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vmw_compat_shader_man_destroy(vmw_fp->shman);
|
||||||
ttm_object_file_release(&vmw_fp->tfile);
|
ttm_object_file_release(&vmw_fp->tfile);
|
||||||
kfree(vmw_fp);
|
kfree(vmw_fp);
|
||||||
}
|
}
|
||||||
|
@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
|
||||||
if (unlikely(vmw_fp->tfile == NULL))
|
if (unlikely(vmw_fp->tfile == NULL))
|
||||||
goto out_no_tfile;
|
goto out_no_tfile;
|
||||||
|
|
||||||
|
vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
|
||||||
|
if (IS_ERR(vmw_fp->shman))
|
||||||
|
goto out_no_shman;
|
||||||
|
|
||||||
file_priv->driver_priv = vmw_fp;
|
file_priv->driver_priv = vmw_fp;
|
||||||
dev_priv->bdev.dev_mapping = dev->dev_mapping;
|
dev_priv->bdev.dev_mapping = dev->dev_mapping;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_no_shman:
|
||||||
|
ttm_object_file_release(&vmw_fp->tfile);
|
||||||
out_no_tfile:
|
out_no_tfile:
|
||||||
kfree(vmw_fp);
|
kfree(vmw_fp);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -75,10 +75,14 @@
|
||||||
#define VMW_RES_FENCE ttm_driver_type3
|
#define VMW_RES_FENCE ttm_driver_type3
|
||||||
#define VMW_RES_SHADER ttm_driver_type4
|
#define VMW_RES_SHADER ttm_driver_type4
|
||||||
|
|
||||||
|
struct vmw_compat_shader_manager;
|
||||||
|
|
||||||
struct vmw_fpriv {
|
struct vmw_fpriv {
|
||||||
struct drm_master *locked_master;
|
struct drm_master *locked_master;
|
||||||
struct ttm_object_file *tfile;
|
struct ttm_object_file *tfile;
|
||||||
struct list_head fence_events;
|
struct list_head fence_events;
|
||||||
|
bool gb_aware;
|
||||||
|
struct vmw_compat_shader_manager *shman;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vmw_dma_buffer {
|
struct vmw_dma_buffer {
|
||||||
|
@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo {
|
||||||
struct vmw_resource *ctx;
|
struct vmw_resource *ctx;
|
||||||
struct vmw_resource *res;
|
struct vmw_resource *res;
|
||||||
enum vmw_ctx_binding_type bt;
|
enum vmw_ctx_binding_type bt;
|
||||||
|
bool scrubbed;
|
||||||
union {
|
union {
|
||||||
SVGA3dShaderType shader_type;
|
SVGA3dShaderType shader_type;
|
||||||
SVGA3dRenderTargetType rt_type;
|
SVGA3dRenderTargetType rt_type;
|
||||||
|
@ -318,7 +323,7 @@ struct vmw_sw_context{
|
||||||
struct drm_open_hash res_ht;
|
struct drm_open_hash res_ht;
|
||||||
bool res_ht_initialized;
|
bool res_ht_initialized;
|
||||||
bool kernel; /**< is the called made from the kernel */
|
bool kernel; /**< is the called made from the kernel */
|
||||||
struct ttm_object_file *tfile;
|
struct vmw_fpriv *fp;
|
||||||
struct list_head validate_nodes;
|
struct list_head validate_nodes;
|
||||||
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
|
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
|
||||||
uint32_t cur_reloc;
|
uint32_t cur_reloc;
|
||||||
|
@ -336,6 +341,7 @@ struct vmw_sw_context{
|
||||||
bool needs_post_query_barrier;
|
bool needs_post_query_barrier;
|
||||||
struct vmw_resource *error_resource;
|
struct vmw_resource *error_resource;
|
||||||
struct vmw_ctx_binding_state staged_bindings;
|
struct vmw_ctx_binding_state staged_bindings;
|
||||||
|
struct list_head staged_shaders;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vmw_legacy_display;
|
struct vmw_legacy_display;
|
||||||
|
@ -569,6 +575,8 @@ struct vmw_user_resource_conv;
|
||||||
|
|
||||||
extern void vmw_resource_unreference(struct vmw_resource **p_res);
|
extern void vmw_resource_unreference(struct vmw_resource **p_res);
|
||||||
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
|
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
|
||||||
|
extern struct vmw_resource *
|
||||||
|
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
|
||||||
extern int vmw_resource_validate(struct vmw_resource *res);
|
extern int vmw_resource_validate(struct vmw_resource *res);
|
||||||
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
|
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
|
||||||
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
|
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
|
||||||
|
@ -957,6 +965,9 @@ extern void
|
||||||
vmw_context_binding_state_transfer(struct vmw_resource *res,
|
vmw_context_binding_state_transfer(struct vmw_resource *res,
|
||||||
struct vmw_ctx_binding_state *cbs);
|
struct vmw_ctx_binding_state *cbs);
|
||||||
extern void vmw_context_binding_res_list_kill(struct list_head *head);
|
extern void vmw_context_binding_res_list_kill(struct list_head *head);
|
||||||
|
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
|
||||||
|
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
|
||||||
|
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Surface management - vmwgfx_surface.c
|
* Surface management - vmwgfx_surface.c
|
||||||
|
@ -991,6 +1002,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
|
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
|
extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
|
||||||
|
SVGA3dShaderType shader_type,
|
||||||
|
u32 *user_key);
|
||||||
|
extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
|
||||||
|
struct list_head *list);
|
||||||
|
extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
|
||||||
|
struct list_head *list);
|
||||||
|
extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
|
||||||
|
u32 user_key,
|
||||||
|
SVGA3dShaderType shader_type,
|
||||||
|
struct list_head *list);
|
||||||
|
extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
|
||||||
|
u32 user_key, const void *bytecode,
|
||||||
|
SVGA3dShaderType shader_type,
|
||||||
|
size_t size,
|
||||||
|
struct ttm_object_file *tfile,
|
||||||
|
struct list_head *list);
|
||||||
|
extern struct vmw_compat_shader_manager *
|
||||||
|
vmw_compat_shader_man_create(struct vmw_private *dev_priv);
|
||||||
|
extern void
|
||||||
|
vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Inline helper functions
|
* Inline helper functions
|
||||||
|
|
|
@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list,
|
||||||
* persistent context binding tracker.
|
* persistent context binding tracker.
|
||||||
*/
|
*/
|
||||||
if (unlikely(val->staged_bindings)) {
|
if (unlikely(val->staged_bindings)) {
|
||||||
vmw_context_binding_state_transfer
|
if (!backoff) {
|
||||||
(val->res, val->staged_bindings);
|
vmw_context_binding_state_transfer
|
||||||
|
(val->res, val->staged_bindings);
|
||||||
|
}
|
||||||
kfree(val->staged_bindings);
|
kfree(val->staged_bindings);
|
||||||
val->staged_bindings = NULL;
|
val->staged_bindings = NULL;
|
||||||
}
|
}
|
||||||
|
@ -177,6 +179,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_resource_context_res_add - Put resources previously bound to a context on
|
||||||
|
* the validation list
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a device private structure
|
||||||
|
* @sw_context: Pointer to a software context used for this command submission
|
||||||
|
* @ctx: Pointer to the context resource
|
||||||
|
*
|
||||||
|
* This function puts all resources that were previously bound to @ctx on
|
||||||
|
* the resource validation list. This is part of the context state reemission
|
||||||
|
*/
|
||||||
|
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_sw_context *sw_context,
|
||||||
|
struct vmw_resource *ctx)
|
||||||
|
{
|
||||||
|
struct list_head *binding_list;
|
||||||
|
struct vmw_ctx_binding *entry;
|
||||||
|
int ret = 0;
|
||||||
|
struct vmw_resource *res;
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->binding_mutex);
|
||||||
|
binding_list = vmw_context_binding_list(ctx);
|
||||||
|
|
||||||
|
list_for_each_entry(entry, binding_list, ctx_list) {
|
||||||
|
res = vmw_resource_reference_unless_doomed(entry->bi.res);
|
||||||
|
if (unlikely(res == NULL))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
|
||||||
|
vmw_resource_unreference(&res);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&dev_priv->binding_mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_resource_relocation_add - Add a relocation to the relocation list
|
* vmw_resource_relocation_add - Add a relocation to the relocation list
|
||||||
*
|
*
|
||||||
|
@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
|
||||||
{
|
{
|
||||||
struct vmw_resource_relocation *rel;
|
struct vmw_resource_relocation *rel;
|
||||||
|
|
||||||
list_for_each_entry(rel, list, head)
|
list_for_each_entry(rel, list, head) {
|
||||||
cb[rel->offset] = rel->res->id;
|
if (likely(rel->res != NULL))
|
||||||
|
cb[rel->offset] = rel->res->id;
|
||||||
|
else
|
||||||
|
cb[rel->offset] = SVGA_3D_CMD_NOP;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
|
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
|
||||||
|
@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_cmd_res_check - Check that a resource is present and if so, put it
|
* vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
|
||||||
* on the resource validate list unless it's already there.
|
* on the resource validate list unless it's already there.
|
||||||
*
|
*
|
||||||
* @dev_priv: Pointer to a device private structure.
|
* @dev_priv: Pointer to a device private structure.
|
||||||
* @sw_context: Pointer to the software context.
|
* @sw_context: Pointer to the software context.
|
||||||
* @res_type: Resource type.
|
* @res_type: Resource type.
|
||||||
* @converter: User-space visisble type specific information.
|
* @converter: User-space visisble type specific information.
|
||||||
* @id: Pointer to the location in the command buffer currently being
|
* @id: user-space resource id handle.
|
||||||
|
* @id_loc: Pointer to the location in the command buffer currently being
|
||||||
* parsed from where the user-space resource id handle is located.
|
* parsed from where the user-space resource id handle is located.
|
||||||
|
* @p_val: Pointer to pointer to resource validalidation node. Populated
|
||||||
|
* on exit.
|
||||||
*/
|
*/
|
||||||
static int vmw_cmd_res_check(struct vmw_private *dev_priv,
|
static int
|
||||||
struct vmw_sw_context *sw_context,
|
vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
|
||||||
enum vmw_res_type res_type,
|
struct vmw_sw_context *sw_context,
|
||||||
const struct vmw_user_resource_conv *converter,
|
enum vmw_res_type res_type,
|
||||||
uint32_t *id,
|
const struct vmw_user_resource_conv *converter,
|
||||||
struct vmw_resource_val_node **p_val)
|
uint32_t id,
|
||||||
|
uint32_t *id_loc,
|
||||||
|
struct vmw_resource_val_node **p_val)
|
||||||
{
|
{
|
||||||
struct vmw_res_cache_entry *rcache =
|
struct vmw_res_cache_entry *rcache =
|
||||||
&sw_context->res_cache[res_type];
|
&sw_context->res_cache[res_type];
|
||||||
|
@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
|
||||||
struct vmw_resource_val_node *node;
|
struct vmw_resource_val_node *node;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (*id == SVGA3D_INVALID_ID) {
|
if (id == SVGA3D_INVALID_ID) {
|
||||||
if (p_val)
|
if (p_val)
|
||||||
*p_val = NULL;
|
*p_val = NULL;
|
||||||
if (res_type == vmw_res_context) {
|
if (res_type == vmw_res_context) {
|
||||||
|
@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
|
||||||
* resource
|
* resource
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (likely(rcache->valid && *id == rcache->handle)) {
|
if (likely(rcache->valid && id == rcache->handle)) {
|
||||||
const struct vmw_resource *res = rcache->res;
|
const struct vmw_resource *res = rcache->res;
|
||||||
|
|
||||||
rcache->node->first_usage = false;
|
rcache->node->first_usage = false;
|
||||||
|
@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
|
||||||
|
|
||||||
return vmw_resource_relocation_add
|
return vmw_resource_relocation_add
|
||||||
(&sw_context->res_relocations, res,
|
(&sw_context->res_relocations, res,
|
||||||
id - sw_context->buf_start);
|
id_loc - sw_context->buf_start);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = vmw_user_resource_lookup_handle(dev_priv,
|
ret = vmw_user_resource_lookup_handle(dev_priv,
|
||||||
sw_context->tfile,
|
sw_context->fp->tfile,
|
||||||
*id,
|
id,
|
||||||
converter,
|
converter,
|
||||||
&res);
|
&res);
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
DRM_ERROR("Could not find or use resource 0x%08x.\n",
|
DRM_ERROR("Could not find or use resource 0x%08x.\n",
|
||||||
(unsigned) *id);
|
(unsigned) id);
|
||||||
dump_stack();
|
dump_stack();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
rcache->valid = true;
|
rcache->valid = true;
|
||||||
rcache->res = res;
|
rcache->res = res;
|
||||||
rcache->handle = *id;
|
rcache->handle = id;
|
||||||
|
|
||||||
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
|
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
|
||||||
res,
|
res,
|
||||||
id - sw_context->buf_start);
|
id_loc - sw_context->buf_start);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_no_reloc;
|
goto out_no_reloc;
|
||||||
|
|
||||||
|
@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
|
||||||
if (p_val)
|
if (p_val)
|
||||||
*p_val = node;
|
*p_val = node;
|
||||||
|
|
||||||
if (node->first_usage && res_type == vmw_res_context) {
|
if (dev_priv->has_mob && node->first_usage &&
|
||||||
|
res_type == vmw_res_context) {
|
||||||
|
ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_no_reloc;
|
||||||
node->staged_bindings =
|
node->staged_bindings =
|
||||||
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
|
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
|
||||||
if (node->staged_bindings == NULL) {
|
if (node->staged_bindings == NULL) {
|
||||||
|
@ -480,6 +533,59 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_cmd_res_check - Check that a resource is present and if so, put it
|
||||||
|
* on the resource validate list unless it's already there.
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a device private structure.
|
||||||
|
* @sw_context: Pointer to the software context.
|
||||||
|
* @res_type: Resource type.
|
||||||
|
* @converter: User-space visisble type specific information.
|
||||||
|
* @id_loc: Pointer to the location in the command buffer currently being
|
||||||
|
* parsed from where the user-space resource id handle is located.
|
||||||
|
* @p_val: Pointer to pointer to resource validalidation node. Populated
|
||||||
|
* on exit.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
vmw_cmd_res_check(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_sw_context *sw_context,
|
||||||
|
enum vmw_res_type res_type,
|
||||||
|
const struct vmw_user_resource_conv *converter,
|
||||||
|
uint32_t *id_loc,
|
||||||
|
struct vmw_resource_val_node **p_val)
|
||||||
|
{
|
||||||
|
return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
|
||||||
|
converter, *id_loc, id_loc, p_val);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_rebind_contexts - Rebind all resources previously bound to
|
||||||
|
* referenced contexts.
|
||||||
|
*
|
||||||
|
* @sw_context: Pointer to the software context.
|
||||||
|
*
|
||||||
|
* Rebind context binding points that have been scrubbed because of eviction.
|
||||||
|
*/
|
||||||
|
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
|
||||||
|
{
|
||||||
|
struct vmw_resource_val_node *val;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
list_for_each_entry(val, &sw_context->resource_list, head) {
|
||||||
|
if (likely(!val->staged_bindings))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ret = vmw_context_rebind_all(val->res);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
if (ret != -ERESTARTSYS)
|
||||||
|
DRM_ERROR("Failed to rebind context.\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_cmd_cid_check - Check a command header for valid context information.
|
* vmw_cmd_cid_check - Check a command header for valid context information.
|
||||||
*
|
*
|
||||||
|
@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
||||||
struct vmw_relocation *reloc;
|
struct vmw_relocation *reloc;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
|
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
DRM_ERROR("Could not find or use MOB buffer.\n");
|
DRM_ERROR("Could not find or use MOB buffer.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
|
||||||
struct vmw_relocation *reloc;
|
struct vmw_relocation *reloc;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
|
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
DRM_ERROR("Could not find or use GMR region.\n");
|
DRM_ERROR("Could not find or use GMR region.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
||||||
|
|
||||||
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
|
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
|
||||||
|
|
||||||
vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
|
vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
|
||||||
|
header);
|
||||||
|
|
||||||
out_no_surface:
|
out_no_surface:
|
||||||
vmw_dmabuf_unreference(&vmw_bo);
|
vmw_dmabuf_unreference(&vmw_bo);
|
||||||
|
@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
|
||||||
&cmd->body.sid, NULL);
|
&cmd->body.sid, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
|
||||||
|
* command
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a device private struct.
|
||||||
|
* @sw_context: The software context being used for this batch.
|
||||||
|
* @header: Pointer to the command header in the command stream.
|
||||||
|
*/
|
||||||
|
static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_sw_context *sw_context,
|
||||||
|
SVGA3dCmdHeader *header)
|
||||||
|
{
|
||||||
|
struct vmw_shader_define_cmd {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdDefineShader body;
|
||||||
|
} *cmd;
|
||||||
|
int ret;
|
||||||
|
size_t size;
|
||||||
|
|
||||||
|
cmd = container_of(header, struct vmw_shader_define_cmd,
|
||||||
|
header);
|
||||||
|
|
||||||
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
||||||
|
user_context_converter, &cmd->body.cid,
|
||||||
|
NULL);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (unlikely(!dev_priv->has_mob))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
size = cmd->header.size - sizeof(cmd->body);
|
||||||
|
ret = vmw_compat_shader_add(sw_context->fp->shman,
|
||||||
|
cmd->body.shid, cmd + 1,
|
||||||
|
cmd->body.type, size,
|
||||||
|
sw_context->fp->tfile,
|
||||||
|
&sw_context->staged_shaders);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return vmw_resource_relocation_add(&sw_context->res_relocations,
|
||||||
|
NULL, &cmd->header.id -
|
||||||
|
sw_context->buf_start);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
|
||||||
|
* command
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a device private struct.
|
||||||
|
* @sw_context: The software context being used for this batch.
|
||||||
|
* @header: Pointer to the command header in the command stream.
|
||||||
|
*/
|
||||||
|
static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_sw_context *sw_context,
|
||||||
|
SVGA3dCmdHeader *header)
|
||||||
|
{
|
||||||
|
struct vmw_shader_destroy_cmd {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdDestroyShader body;
|
||||||
|
} *cmd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
cmd = container_of(header, struct vmw_shader_destroy_cmd,
|
||||||
|
header);
|
||||||
|
|
||||||
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
||||||
|
user_context_converter, &cmd->body.cid,
|
||||||
|
NULL);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (unlikely(!dev_priv->has_mob))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = vmw_compat_shader_remove(sw_context->fp->shman,
|
||||||
|
cmd->body.shid,
|
||||||
|
cmd->body.type,
|
||||||
|
&sw_context->staged_shaders);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return vmw_resource_relocation_add(&sw_context->res_relocations,
|
||||||
|
NULL, &cmd->header.id -
|
||||||
|
sw_context->buf_start);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
|
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
|
||||||
* command
|
* command
|
||||||
|
@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
||||||
if (dev_priv->has_mob) {
|
if (dev_priv->has_mob) {
|
||||||
struct vmw_ctx_bindinfo bi;
|
struct vmw_ctx_bindinfo bi;
|
||||||
struct vmw_resource_val_node *res_node;
|
struct vmw_resource_val_node *res_node;
|
||||||
|
u32 shid = cmd->body.shid;
|
||||||
|
|
||||||
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
|
if (shid != SVGA3D_INVALID_ID)
|
||||||
user_shader_converter,
|
(void) vmw_compat_shader_lookup(sw_context->fp->shman,
|
||||||
&cmd->body.shid, &res_node);
|
cmd->body.type,
|
||||||
|
&shid);
|
||||||
|
|
||||||
|
ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
|
||||||
|
vmw_res_shader,
|
||||||
|
user_shader_converter,
|
||||||
|
shid,
|
||||||
|
&cmd->body.shid, &res_node);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1526,6 +1733,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
|
||||||
|
* command
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a device private struct.
|
||||||
|
* @sw_context: The software context being used for this batch.
|
||||||
|
* @header: Pointer to the command header in the command stream.
|
||||||
|
*/
|
||||||
|
static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_sw_context *sw_context,
|
||||||
|
SVGA3dCmdHeader *header)
|
||||||
|
{
|
||||||
|
struct vmw_set_shader_const_cmd {
|
||||||
|
SVGA3dCmdHeader header;
|
||||||
|
SVGA3dCmdSetShaderConst body;
|
||||||
|
} *cmd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
cmd = container_of(header, struct vmw_set_shader_const_cmd,
|
||||||
|
header);
|
||||||
|
|
||||||
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
||||||
|
user_context_converter, &cmd->body.cid,
|
||||||
|
NULL);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (dev_priv->has_mob)
|
||||||
|
header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
|
* vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
|
||||||
* command
|
* command
|
||||||
|
@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
|
||||||
true, false, false),
|
true, false, false),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
|
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
|
||||||
false, false, false),
|
false, false, false),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
|
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
|
||||||
true, true, false),
|
true, false, false),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
|
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
|
||||||
true, true, false),
|
true, false, false),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
|
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
|
||||||
true, false, false),
|
true, false, false),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
|
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
|
||||||
true, true, false),
|
true, false, false),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
|
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
|
||||||
true, false, false),
|
true, false, false),
|
||||||
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
|
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
|
||||||
|
@ -2171,7 +2411,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
||||||
} else
|
} else
|
||||||
sw_context->kernel = true;
|
sw_context->kernel = true;
|
||||||
|
|
||||||
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
|
sw_context->fp = vmw_fpriv(file_priv);
|
||||||
sw_context->cur_reloc = 0;
|
sw_context->cur_reloc = 0;
|
||||||
sw_context->cur_val_buf = 0;
|
sw_context->cur_val_buf = 0;
|
||||||
sw_context->fence_flags = 0;
|
sw_context->fence_flags = 0;
|
||||||
|
@ -2188,16 +2428,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
sw_context->res_ht_initialized = true;
|
sw_context->res_ht_initialized = true;
|
||||||
}
|
}
|
||||||
|
INIT_LIST_HEAD(&sw_context->staged_shaders);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&resource_list);
|
INIT_LIST_HEAD(&resource_list);
|
||||||
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
|
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
|
||||||
command_size);
|
command_size);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_err;
|
goto out_err_nores;
|
||||||
|
|
||||||
ret = vmw_resources_reserve(sw_context);
|
ret = vmw_resources_reserve(sw_context);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_err;
|
goto out_err_nores;
|
||||||
|
|
||||||
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
|
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
|
@ -2225,6 +2466,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (dev_priv->has_mob) {
|
||||||
|
ret = vmw_rebind_contexts(sw_context);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
cmd = vmw_fifo_reserve(dev_priv, command_size);
|
cmd = vmw_fifo_reserve(dev_priv, command_size);
|
||||||
if (unlikely(cmd == NULL)) {
|
if (unlikely(cmd == NULL)) {
|
||||||
DRM_ERROR("Failed reserving fifo space for commands.\n");
|
DRM_ERROR("Failed reserving fifo space for commands.\n");
|
||||||
|
@ -2276,6 +2523,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
||||||
}
|
}
|
||||||
|
|
||||||
list_splice_init(&sw_context->resource_list, &resource_list);
|
list_splice_init(&sw_context->resource_list, &resource_list);
|
||||||
|
vmw_compat_shaders_commit(sw_context->fp->shman,
|
||||||
|
&sw_context->staged_shaders);
|
||||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2289,10 +2538,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
||||||
out_unlock_binding:
|
out_unlock_binding:
|
||||||
mutex_unlock(&dev_priv->binding_mutex);
|
mutex_unlock(&dev_priv->binding_mutex);
|
||||||
out_err:
|
out_err:
|
||||||
|
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
|
||||||
|
out_err_nores:
|
||||||
|
vmw_resource_list_unreserve(&sw_context->resource_list, true);
|
||||||
vmw_resource_relocations_free(&sw_context->res_relocations);
|
vmw_resource_relocations_free(&sw_context->res_relocations);
|
||||||
vmw_free_relocations(sw_context);
|
vmw_free_relocations(sw_context);
|
||||||
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
|
|
||||||
vmw_resource_list_unreserve(&sw_context->resource_list, true);
|
|
||||||
vmw_clear_validations(sw_context);
|
vmw_clear_validations(sw_context);
|
||||||
if (unlikely(dev_priv->pinned_bo != NULL &&
|
if (unlikely(dev_priv->pinned_bo != NULL &&
|
||||||
!dev_priv->query_cid_valid))
|
!dev_priv->query_cid_valid))
|
||||||
|
@ -2301,6 +2551,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
||||||
list_splice_init(&sw_context->resource_list, &resource_list);
|
list_splice_init(&sw_context->resource_list, &resource_list);
|
||||||
error_resource = sw_context->error_resource;
|
error_resource = sw_context->error_resource;
|
||||||
sw_context->error_resource = NULL;
|
sw_context->error_resource = NULL;
|
||||||
|
vmw_compat_shaders_revert(sw_context->fp->shman,
|
||||||
|
&sw_context->staged_shaders);
|
||||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -29,12 +29,18 @@
|
||||||
#include <drm/vmwgfx_drm.h>
|
#include <drm/vmwgfx_drm.h>
|
||||||
#include "vmwgfx_kms.h"
|
#include "vmwgfx_kms.h"
|
||||||
|
|
||||||
|
struct svga_3d_compat_cap {
|
||||||
|
SVGA3dCapsRecordHeader header;
|
||||||
|
SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
|
||||||
|
};
|
||||||
|
|
||||||
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||||
struct drm_vmw_getparam_arg *param =
|
struct drm_vmw_getparam_arg *param =
|
||||||
(struct drm_vmw_getparam_arg *)data;
|
(struct drm_vmw_getparam_arg *)data;
|
||||||
|
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
|
||||||
|
|
||||||
switch (param->param) {
|
switch (param->param) {
|
||||||
case DRM_VMW_PARAM_NUM_STREAMS:
|
case DRM_VMW_PARAM_NUM_STREAMS:
|
||||||
|
@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||||
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||||
|
|
||||||
|
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
|
||||||
|
param->value = SVGA3D_HWVERSION_WS8_B1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
param->value =
|
param->value =
|
||||||
ioread32(fifo_mem +
|
ioread32(fifo_mem +
|
||||||
((fifo->capabilities &
|
((fifo->capabilities &
|
||||||
|
@ -69,17 +80,26 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
|
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
|
||||||
param->value = dev_priv->memory_size;
|
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
|
||||||
|
!vmw_fp->gb_aware)
|
||||||
|
param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
|
||||||
|
else
|
||||||
|
param->value = dev_priv->memory_size;
|
||||||
break;
|
break;
|
||||||
case DRM_VMW_PARAM_3D_CAPS_SIZE:
|
case DRM_VMW_PARAM_3D_CAPS_SIZE:
|
||||||
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
|
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
|
||||||
param->value = SVGA3D_DEVCAP_MAX;
|
vmw_fp->gb_aware)
|
||||||
|
param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
|
||||||
|
else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
|
||||||
|
param->value = sizeof(struct svga_3d_compat_cap) +
|
||||||
|
sizeof(uint32_t);
|
||||||
else
|
else
|
||||||
param->value = (SVGA_FIFO_3D_CAPS_LAST -
|
param->value = (SVGA_FIFO_3D_CAPS_LAST -
|
||||||
SVGA_FIFO_3D_CAPS + 1);
|
SVGA_FIFO_3D_CAPS + 1) *
|
||||||
param->value *= sizeof(uint32_t);
|
sizeof(uint32_t);
|
||||||
break;
|
break;
|
||||||
case DRM_VMW_PARAM_MAX_MOB_MEMORY:
|
case DRM_VMW_PARAM_MAX_MOB_MEMORY:
|
||||||
|
vmw_fp->gb_aware = true;
|
||||||
param->value = dev_priv->max_mob_pages * PAGE_SIZE;
|
param->value = dev_priv->max_mob_pages * PAGE_SIZE;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -91,6 +111,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
|
||||||
|
size_t size)
|
||||||
|
{
|
||||||
|
struct svga_3d_compat_cap *compat_cap =
|
||||||
|
(struct svga_3d_compat_cap *) bounce;
|
||||||
|
unsigned int i;
|
||||||
|
size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
|
||||||
|
unsigned int max_size;
|
||||||
|
|
||||||
|
if (size < pair_offset)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
|
||||||
|
|
||||||
|
if (max_size > SVGA3D_DEVCAP_MAX)
|
||||||
|
max_size = SVGA3D_DEVCAP_MAX;
|
||||||
|
|
||||||
|
compat_cap->header.length =
|
||||||
|
(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
|
||||||
|
compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->hw_mutex);
|
||||||
|
for (i = 0; i < max_size; ++i) {
|
||||||
|
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
|
||||||
|
compat_cap->pairs[i][0] = i;
|
||||||
|
compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
|
||||||
|
}
|
||||||
|
mutex_unlock(&dev_priv->hw_mutex);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
|
int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
|
@ -104,41 +156,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
|
||||||
void *bounce;
|
void *bounce;
|
||||||
int ret;
|
int ret;
|
||||||
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
|
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
|
||||||
|
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
|
||||||
|
|
||||||
if (unlikely(arg->pad64 != 0)) {
|
if (unlikely(arg->pad64 != 0)) {
|
||||||
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
|
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gb_objects)
|
if (gb_objects && vmw_fp->gb_aware)
|
||||||
size = SVGA3D_DEVCAP_MAX;
|
size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
|
||||||
|
else if (gb_objects)
|
||||||
|
size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
|
||||||
else
|
else
|
||||||
size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1);
|
size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
|
||||||
|
sizeof(uint32_t);
|
||||||
size *= sizeof(uint32_t);
|
|
||||||
|
|
||||||
if (arg->max_size < size)
|
if (arg->max_size < size)
|
||||||
size = arg->max_size;
|
size = arg->max_size;
|
||||||
|
|
||||||
bounce = vmalloc(size);
|
bounce = vzalloc(size);
|
||||||
if (unlikely(bounce == NULL)) {
|
if (unlikely(bounce == NULL)) {
|
||||||
DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
|
DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gb_objects) {
|
if (gb_objects && vmw_fp->gb_aware) {
|
||||||
int i;
|
int i, num;
|
||||||
uint32_t *bounce32 = (uint32_t *) bounce;
|
uint32_t *bounce32 = (uint32_t *) bounce;
|
||||||
|
|
||||||
|
num = size / sizeof(uint32_t);
|
||||||
|
if (num > SVGA3D_DEVCAP_MAX)
|
||||||
|
num = SVGA3D_DEVCAP_MAX;
|
||||||
|
|
||||||
mutex_lock(&dev_priv->hw_mutex);
|
mutex_lock(&dev_priv->hw_mutex);
|
||||||
for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) {
|
for (i = 0; i < num; ++i) {
|
||||||
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
|
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
|
||||||
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
|
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
|
||||||
}
|
}
|
||||||
mutex_unlock(&dev_priv->hw_mutex);
|
mutex_unlock(&dev_priv->hw_mutex);
|
||||||
|
} else if (gb_objects) {
|
||||||
|
ret = vmw_fill_compat_cap(dev_priv, bounce, size);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_err;
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
fifo_mem = dev_priv->mmio_virt;
|
fifo_mem = dev_priv->mmio_virt;
|
||||||
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
|
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
|
||||||
}
|
}
|
||||||
|
@ -146,6 +206,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
|
||||||
ret = copy_to_user(buffer, bounce, size);
|
ret = copy_to_user(buffer, bounce, size);
|
||||||
if (ret)
|
if (ret)
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
|
out_err:
|
||||||
vfree(bounce);
|
vfree(bounce);
|
||||||
|
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
|
|
|
@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
|
||||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
if (unlikely(cmd == NULL)) {
|
if (unlikely(cmd == NULL)) {
|
||||||
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
|
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
|
||||||
|
ret = -ENOMEM;
|
||||||
goto out_no_fifo;
|
goto out_no_fifo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct vmw_resource *
|
||||||
|
vmw_resource_reference_unless_doomed(struct vmw_resource *res)
|
||||||
|
{
|
||||||
|
return kref_get_unless_zero(&res->kref) ? res : NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_resource_release_id - release a resource id to the id manager.
|
* vmw_resource_release_id - release a resource id to the id manager.
|
||||||
|
@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
|
||||||
vmw_dmabuf_unreference(&res->backup);
|
vmw_dmabuf_unreference(&res->backup);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(res->hw_destroy != NULL))
|
if (likely(res->hw_destroy != NULL)) {
|
||||||
res->hw_destroy(res);
|
res->hw_destroy(res);
|
||||||
|
mutex_lock(&dev_priv->binding_mutex);
|
||||||
|
vmw_context_binding_res_list_kill(&res->binding_head);
|
||||||
|
mutex_unlock(&dev_priv->binding_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
id = res->id;
|
id = res->id;
|
||||||
if (res->res_free != NULL)
|
if (res->res_free != NULL)
|
||||||
|
|
|
@ -29,6 +29,8 @@
|
||||||
#include "vmwgfx_resource_priv.h"
|
#include "vmwgfx_resource_priv.h"
|
||||||
#include "ttm/ttm_placement.h"
|
#include "ttm/ttm_placement.h"
|
||||||
|
|
||||||
|
#define VMW_COMPAT_SHADER_HT_ORDER 12
|
||||||
|
|
||||||
struct vmw_shader {
|
struct vmw_shader {
|
||||||
struct vmw_resource res;
|
struct vmw_resource res;
|
||||||
SVGA3dShaderType type;
|
SVGA3dShaderType type;
|
||||||
|
@ -40,6 +42,50 @@ struct vmw_user_shader {
|
||||||
struct vmw_shader shader;
|
struct vmw_shader shader;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* enum vmw_compat_shader_state - Staging state for compat shaders
|
||||||
|
*/
|
||||||
|
enum vmw_compat_shader_state {
|
||||||
|
VMW_COMPAT_COMMITED,
|
||||||
|
VMW_COMPAT_ADD,
|
||||||
|
VMW_COMPAT_DEL
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct vmw_compat_shader - Metadata for compat shaders.
|
||||||
|
*
|
||||||
|
* @handle: The TTM handle of the guest backed shader.
|
||||||
|
* @tfile: The struct ttm_object_file the guest backed shader is registered
|
||||||
|
* with.
|
||||||
|
* @hash: Hash item for lookup.
|
||||||
|
* @head: List head for staging lists or the compat shader manager list.
|
||||||
|
* @state: Staging state.
|
||||||
|
*
|
||||||
|
* The structure is protected by the cmdbuf lock.
|
||||||
|
*/
|
||||||
|
struct vmw_compat_shader {
|
||||||
|
u32 handle;
|
||||||
|
struct ttm_object_file *tfile;
|
||||||
|
struct drm_hash_item hash;
|
||||||
|
struct list_head head;
|
||||||
|
enum vmw_compat_shader_state state;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct vmw_compat_shader_manager - Compat shader manager.
|
||||||
|
*
|
||||||
|
* @shaders: Hash table containing staged and commited compat shaders
|
||||||
|
* @list: List of commited shaders.
|
||||||
|
* @dev_priv: Pointer to a device private structure.
|
||||||
|
*
|
||||||
|
* @shaders and @list are protected by the cmdbuf mutex for now.
|
||||||
|
*/
|
||||||
|
struct vmw_compat_shader_manager {
|
||||||
|
struct drm_open_hash shaders;
|
||||||
|
struct list_head list;
|
||||||
|
struct vmw_private *dev_priv;
|
||||||
|
};
|
||||||
|
|
||||||
static void vmw_user_shader_free(struct vmw_resource *res);
|
static void vmw_user_shader_free(struct vmw_resource *res);
|
||||||
static struct vmw_resource *
|
static struct vmw_resource *
|
||||||
vmw_user_shader_base_to_res(struct ttm_base_object *base);
|
vmw_user_shader_base_to_res(struct ttm_base_object *base);
|
||||||
|
@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&dev_priv->binding_mutex);
|
mutex_lock(&dev_priv->binding_mutex);
|
||||||
vmw_context_binding_res_list_kill(&res->binding_head);
|
vmw_context_binding_res_list_scrub(&res->binding_head);
|
||||||
|
|
||||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
if (unlikely(cmd == NULL)) {
|
if (unlikely(cmd == NULL)) {
|
||||||
|
@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
|
||||||
TTM_REF_USAGE);
|
TTM_REF_USAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int vmw_shader_alloc(struct vmw_private *dev_priv,
|
||||||
|
struct vmw_dma_buffer *buffer,
|
||||||
|
size_t shader_size,
|
||||||
|
size_t offset,
|
||||||
|
SVGA3dShaderType shader_type,
|
||||||
|
struct ttm_object_file *tfile,
|
||||||
|
u32 *handle)
|
||||||
|
{
|
||||||
|
struct vmw_user_shader *ushader;
|
||||||
|
struct vmw_resource *res, *tmp;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Approximate idr memory usage with 128 bytes. It will be limited
|
||||||
|
* by maximum number_of shaders anyway.
|
||||||
|
*/
|
||||||
|
if (unlikely(vmw_user_shader_size == 0))
|
||||||
|
vmw_user_shader_size =
|
||||||
|
ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
|
||||||
|
|
||||||
|
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||||
|
vmw_user_shader_size,
|
||||||
|
false, true);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
if (ret != -ERESTARTSYS)
|
||||||
|
DRM_ERROR("Out of graphics memory for shader "
|
||||||
|
"creation.\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
|
||||||
|
if (unlikely(ushader == NULL)) {
|
||||||
|
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||||
|
vmw_user_shader_size);
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
res = &ushader->shader.res;
|
||||||
|
ushader->base.shareable = false;
|
||||||
|
ushader->base.tfile = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* From here on, the destructor takes over resource freeing.
|
||||||
|
*/
|
||||||
|
|
||||||
|
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
|
||||||
|
offset, shader_type, buffer,
|
||||||
|
vmw_user_shader_free);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
tmp = vmw_resource_reference(res);
|
||||||
|
ret = ttm_base_object_init(tfile, &ushader->base, false,
|
||||||
|
VMW_RES_SHADER,
|
||||||
|
&vmw_user_shader_base_release, NULL);
|
||||||
|
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
vmw_resource_unreference(&tmp);
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (handle)
|
||||||
|
*handle = ushader->base.hash.key;
|
||||||
|
out_err:
|
||||||
|
vmw_resource_unreference(&res);
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||||
struct vmw_user_shader *ushader;
|
|
||||||
struct vmw_resource *res;
|
|
||||||
struct vmw_resource *tmp;
|
|
||||||
struct drm_vmw_shader_create_arg *arg =
|
struct drm_vmw_shader_create_arg *arg =
|
||||||
(struct drm_vmw_shader_create_arg *)data;
|
(struct drm_vmw_shader_create_arg *)data;
|
||||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||||
|
@ -373,69 +487,324 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
||||||
goto out_bad_arg;
|
goto out_bad_arg;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Approximate idr memory usage with 128 bytes. It will be limited
|
|
||||||
* by maximum number_of shaders anyway.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (unlikely(vmw_user_shader_size == 0))
|
|
||||||
vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
|
|
||||||
+ 128;
|
|
||||||
|
|
||||||
ret = ttm_read_lock(&vmaster->lock, true);
|
ret = ttm_read_lock(&vmaster->lock, true);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
return ret;
|
goto out_bad_arg;
|
||||||
|
|
||||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
|
||||||
vmw_user_shader_size,
|
shader_type, tfile, &arg->shader_handle);
|
||||||
false, true);
|
|
||||||
if (unlikely(ret != 0)) {
|
|
||||||
if (ret != -ERESTARTSYS)
|
|
||||||
DRM_ERROR("Out of graphics memory for shader"
|
|
||||||
" creation.\n");
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
|
|
||||||
if (unlikely(ushader == NULL)) {
|
|
||||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
|
||||||
vmw_user_shader_size);
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
res = &ushader->shader.res;
|
|
||||||
ushader->base.shareable = false;
|
|
||||||
ushader->base.tfile = NULL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* From here on, the destructor takes over resource freeing.
|
|
||||||
*/
|
|
||||||
|
|
||||||
ret = vmw_gb_shader_init(dev_priv, res, arg->size,
|
|
||||||
arg->offset, shader_type, buffer,
|
|
||||||
vmw_user_shader_free);
|
|
||||||
if (unlikely(ret != 0))
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
tmp = vmw_resource_reference(res);
|
|
||||||
ret = ttm_base_object_init(tfile, &ushader->base, false,
|
|
||||||
VMW_RES_SHADER,
|
|
||||||
&vmw_user_shader_base_release, NULL);
|
|
||||||
|
|
||||||
if (unlikely(ret != 0)) {
|
|
||||||
vmw_resource_unreference(&tmp);
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
|
|
||||||
arg->shader_handle = ushader->base.hash.key;
|
|
||||||
out_err:
|
|
||||||
vmw_resource_unreference(&res);
|
|
||||||
out_unlock:
|
|
||||||
ttm_read_unlock(&vmaster->lock);
|
ttm_read_unlock(&vmaster->lock);
|
||||||
out_bad_arg:
|
out_bad_arg:
|
||||||
vmw_dmabuf_unreference(&buffer);
|
vmw_dmabuf_unreference(&buffer);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_compat_shader_lookup - Look up a compat shader
|
||||||
|
*
|
||||||
|
* @man: Pointer to the compat shader manager.
|
||||||
|
* @shader_type: The shader type, that combined with the user_key identifies
|
||||||
|
* the shader.
|
||||||
|
* @user_key: On entry, this should be a pointer to the user_key.
|
||||||
|
* On successful exit, it will contain the guest-backed shader's TTM handle.
|
||||||
|
*
|
||||||
|
* Returns 0 on success. Non-zero on failure, in which case the value pointed
|
||||||
|
* to by @user_key is unmodified.
|
||||||
|
*/
|
||||||
|
int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
|
||||||
|
SVGA3dShaderType shader_type,
|
||||||
|
u32 *user_key)
|
||||||
|
{
|
||||||
|
struct drm_hash_item *hash;
|
||||||
|
int ret;
|
||||||
|
unsigned long key = *user_key | (shader_type << 24);
|
||||||
|
|
||||||
|
ret = drm_ht_find_item(&man->shaders, key, &hash);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
*user_key = drm_hash_entry(hash, struct vmw_compat_shader,
|
||||||
|
hash)->handle;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_compat_shader_free - Free a compat shader.
|
||||||
|
*
|
||||||
|
* @man: Pointer to the compat shader manager.
|
||||||
|
* @entry: Pointer to a struct vmw_compat_shader.
|
||||||
|
*
|
||||||
|
* Frees a struct vmw_compat_shder entry and drops its reference to the
|
||||||
|
* guest backed shader.
|
||||||
|
*/
|
||||||
|
static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
|
||||||
|
struct vmw_compat_shader *entry)
|
||||||
|
{
|
||||||
|
list_del(&entry->head);
|
||||||
|
WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
|
||||||
|
WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
|
||||||
|
TTM_REF_USAGE));
|
||||||
|
kfree(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_compat_shaders_commit - Commit a list of compat shader actions.
|
||||||
|
*
|
||||||
|
* @man: Pointer to the compat shader manager.
|
||||||
|
* @list: Caller's list of compat shader actions.
|
||||||
|
*
|
||||||
|
* This function commits a list of compat shader additions or removals.
|
||||||
|
* It is typically called when the execbuf ioctl call triggering these
|
||||||
|
* actions has commited the fifo contents to the device.
|
||||||
|
*/
|
||||||
|
void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
|
||||||
|
struct list_head *list)
|
||||||
|
{
|
||||||
|
struct vmw_compat_shader *entry, *next;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(entry, next, list, head) {
|
||||||
|
list_del(&entry->head);
|
||||||
|
switch (entry->state) {
|
||||||
|
case VMW_COMPAT_ADD:
|
||||||
|
entry->state = VMW_COMPAT_COMMITED;
|
||||||
|
list_add_tail(&entry->head, &man->list);
|
||||||
|
break;
|
||||||
|
case VMW_COMPAT_DEL:
|
||||||
|
ttm_ref_object_base_unref(entry->tfile, entry->handle,
|
||||||
|
TTM_REF_USAGE);
|
||||||
|
kfree(entry);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_compat_shaders_revert - Revert a list of compat shader actions
|
||||||
|
*
|
||||||
|
* @man: Pointer to the compat shader manager.
|
||||||
|
* @list: Caller's list of compat shader actions.
|
||||||
|
*
|
||||||
|
* This function reverts a list of compat shader additions or removals.
|
||||||
|
* It is typically called when the execbuf ioctl call triggering these
|
||||||
|
* actions failed for some reason, and the command stream was never
|
||||||
|
* submitted.
|
||||||
|
*/
|
||||||
|
void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
|
||||||
|
struct list_head *list)
|
||||||
|
{
|
||||||
|
struct vmw_compat_shader *entry, *next;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(entry, next, list, head) {
|
||||||
|
switch (entry->state) {
|
||||||
|
case VMW_COMPAT_ADD:
|
||||||
|
vmw_compat_shader_free(man, entry);
|
||||||
|
break;
|
||||||
|
case VMW_COMPAT_DEL:
|
||||||
|
ret = drm_ht_insert_item(&man->shaders, &entry->hash);
|
||||||
|
list_del(&entry->head);
|
||||||
|
list_add_tail(&entry->head, &man->list);
|
||||||
|
entry->state = VMW_COMPAT_COMMITED;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_compat_shader_remove - Stage a compat shader for removal.
|
||||||
|
*
|
||||||
|
* @man: Pointer to the compat shader manager
|
||||||
|
* @user_key: The key that is used to identify the shader. The key is
|
||||||
|
* unique to the shader type.
|
||||||
|
* @shader_type: Shader type.
|
||||||
|
* @list: Caller's list of staged shader actions.
|
||||||
|
*
|
||||||
|
* This function stages a compat shader for removal and removes the key from
|
||||||
|
* the shader manager's hash table. If the shader was previously only staged
|
||||||
|
* for addition it is completely removed (But the execbuf code may keep a
|
||||||
|
* reference if it was bound to a context between addition and removal). If
|
||||||
|
* it was previously commited to the manager, it is staged for removal.
|
||||||
|
*/
|
||||||
|
int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
|
||||||
|
u32 user_key, SVGA3dShaderType shader_type,
|
||||||
|
struct list_head *list)
|
||||||
|
{
|
||||||
|
struct vmw_compat_shader *entry;
|
||||||
|
struct drm_hash_item *hash;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
|
||||||
|
&hash);
|
||||||
|
if (likely(ret != 0))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
|
||||||
|
|
||||||
|
switch (entry->state) {
|
||||||
|
case VMW_COMPAT_ADD:
|
||||||
|
vmw_compat_shader_free(man, entry);
|
||||||
|
break;
|
||||||
|
case VMW_COMPAT_COMMITED:
|
||||||
|
(void) drm_ht_remove_item(&man->shaders, &entry->hash);
|
||||||
|
list_del(&entry->head);
|
||||||
|
entry->state = VMW_COMPAT_DEL;
|
||||||
|
list_add_tail(&entry->head, list);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_compat_shader_add - Create a compat shader and add the
|
||||||
|
* key to the manager
|
||||||
|
*
|
||||||
|
* @man: Pointer to the compat shader manager
|
||||||
|
* @user_key: The key that is used to identify the shader. The key is
|
||||||
|
* unique to the shader type.
|
||||||
|
* @bytecode: Pointer to the bytecode of the shader.
|
||||||
|
* @shader_type: Shader type.
|
||||||
|
* @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
|
||||||
|
* to be created with.
|
||||||
|
* @list: Caller's list of staged shader actions.
|
||||||
|
*
|
||||||
|
* Note that only the key is added to the shader manager's hash table.
|
||||||
|
* The shader is not yet added to the shader manager's list of shaders.
|
||||||
|
*/
|
||||||
|
int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
|
||||||
|
u32 user_key, const void *bytecode,
|
||||||
|
SVGA3dShaderType shader_type,
|
||||||
|
size_t size,
|
||||||
|
struct ttm_object_file *tfile,
|
||||||
|
struct list_head *list)
|
||||||
|
{
|
||||||
|
struct vmw_dma_buffer *buf;
|
||||||
|
struct ttm_bo_kmap_obj map;
|
||||||
|
bool is_iomem;
|
||||||
|
struct vmw_compat_shader *compat;
|
||||||
|
u32 handle;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* Allocate and pin a DMA buffer */
|
||||||
|
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||||
|
if (unlikely(buf == NULL))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
|
||||||
|
true, vmw_dmabuf_bo_free);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto no_reserve;
|
||||||
|
|
||||||
|
/* Map and copy shader bytecode. */
|
||||||
|
ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
|
||||||
|
&map);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
ttm_bo_unreserve(&buf->base);
|
||||||
|
goto no_reserve;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
|
||||||
|
WARN_ON(is_iomem);
|
||||||
|
|
||||||
|
ttm_bo_kunmap(&map);
|
||||||
|
ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
|
||||||
|
WARN_ON(ret != 0);
|
||||||
|
ttm_bo_unreserve(&buf->base);
|
||||||
|
|
||||||
|
/* Create a guest-backed shader container backed by the dma buffer */
|
||||||
|
ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
|
||||||
|
tfile, &handle);
|
||||||
|
vmw_dmabuf_unreference(&buf);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto no_reserve;
|
||||||
|
/*
|
||||||
|
* Create a compat shader structure and stage it for insertion
|
||||||
|
* in the manager
|
||||||
|
*/
|
||||||
|
compat = kzalloc(sizeof(*compat), GFP_KERNEL);
|
||||||
|
if (compat == NULL)
|
||||||
|
goto no_compat;
|
||||||
|
|
||||||
|
compat->hash.key = user_key | (shader_type << 24);
|
||||||
|
ret = drm_ht_insert_item(&man->shaders, &compat->hash);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_invalid_key;
|
||||||
|
|
||||||
|
compat->state = VMW_COMPAT_ADD;
|
||||||
|
compat->handle = handle;
|
||||||
|
compat->tfile = tfile;
|
||||||
|
list_add_tail(&compat->head, list);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_invalid_key:
|
||||||
|
kfree(compat);
|
||||||
|
no_compat:
|
||||||
|
ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
|
||||||
|
no_reserve:
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_compat_shader_man_create - Create a compat shader manager
|
||||||
|
*
|
||||||
|
* @dev_priv: Pointer to a device private structure.
|
||||||
|
*
|
||||||
|
* Typically done at file open time. If successful returns a pointer to a
|
||||||
|
* compat shader manager. Otherwise returns an error pointer.
|
||||||
|
*/
|
||||||
|
struct vmw_compat_shader_manager *
|
||||||
|
vmw_compat_shader_man_create(struct vmw_private *dev_priv)
|
||||||
|
{
|
||||||
|
struct vmw_compat_shader_manager *man;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
man = kzalloc(sizeof(*man), GFP_KERNEL);
|
||||||
|
|
||||||
|
man->dev_priv = dev_priv;
|
||||||
|
INIT_LIST_HEAD(&man->list);
|
||||||
|
ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
|
||||||
|
if (ret == 0)
|
||||||
|
return man;
|
||||||
|
|
||||||
|
kfree(man);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmw_compat_shader_man_destroy - Destroy a compat shader manager
|
||||||
|
*
|
||||||
|
* @man: Pointer to the shader manager to destroy.
|
||||||
|
*
|
||||||
|
* Typically done at file close time.
|
||||||
|
*/
|
||||||
|
void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
|
||||||
|
{
|
||||||
|
struct vmw_compat_shader *entry, *next;
|
||||||
|
|
||||||
|
mutex_lock(&man->dev_priv->cmdbuf_mutex);
|
||||||
|
list_for_each_entry_safe(entry, next, &man->list, head)
|
||||||
|
vmw_compat_shader_free(man, entry);
|
||||||
|
|
||||||
|
mutex_unlock(&man->dev_priv->cmdbuf_mutex);
|
||||||
|
kfree(man);
|
||||||
}
|
}
|
||||||
|
|
|
@ -908,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
||||||
rep->size_addr;
|
rep->size_addr;
|
||||||
|
|
||||||
if (user_sizes)
|
if (user_sizes)
|
||||||
ret = copy_to_user(user_sizes, srf->sizes,
|
ret = copy_to_user(user_sizes, &srf->base_size,
|
||||||
srf->num_sizes * sizeof(*srf->sizes));
|
sizeof(srf->base_size));
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
DRM_ERROR("copy_to_user failed %p %u\n",
|
DRM_ERROR("copy_to_user failed %p %u\n",
|
||||||
user_sizes, srf->num_sizes);
|
user_sizes, srf->num_sizes);
|
||||||
|
@ -1111,7 +1111,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&dev_priv->binding_mutex);
|
mutex_lock(&dev_priv->binding_mutex);
|
||||||
vmw_context_binding_res_list_kill(&res->binding_head);
|
vmw_context_binding_res_list_scrub(&res->binding_head);
|
||||||
|
|
||||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||||
if (unlikely(cmd == NULL)) {
|
if (unlikely(cmd == NULL)) {
|
||||||
|
|
Loading…
Reference in New Issue