mirror of https://gitee.com/openkylin/linux.git
drm fixes for 4.9 rc2 - vmware, fsl-dcu, armada and etnaviv
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJYCY2mAAoJEAx081l5xIa+IlsP/RkAGNaQ2++hLXdH49Oo2tEu P4nHSUzUuIVJhKt5nz/WU3heD7JT1AA5BSiXSh18t/pbtpPBrufaDooyjKE9aj5l wZ0CXjTSVbg2ETgRyFlNcichln7g0bO3IrEGebFPA5bXYYAXkjtCw2W5cHtSJKCU fC2Vfa81C3sCwMweBMuXDUOsG/ztANkxAyZ9kXT07aRKaXergWcLvZJQyZE7ZbuH Nr90XMNoPmbQ/wP+O+QdKMOTV1jDVaIBCID+MM0iel0otVO4zH+F8zOeJZMqs+YG n4V17V9gXV8b9muOEPssuN10Pw83mH/vWQZYIxzhbE7SAo9q9fdZ1GKBDN6DeR5e qLCr6k8qmXVaGodWIz4STrh7o4EVrmylX/bbzeRlAkp3dCPoSB4R2QPJx4runzDG TSdnctqp2kEYXt1V2JZfXGJ2krfU9Qdy4jU7zDIc1p0nLc8OXkttLQ59Ozr5YMjc OjZhNFk6TJzMcYnsc7gjI/EnVkeoKcL2KUHYuU/wneScgNo65fJhqqUqVEXH1ftI M24UoSEyPfGBF669rgux1uNv027BKDpDgHNLuyRbBfirfT2Ag9uG8hB9AYPiK4nD JfWfENXZx47F7JRV3oz5CgmyTqv65hucrdkkxHrR/8GRnakSyRBJayw0mdGlehPq X/xn0luswrt5jcCAj9lr =Jsu/ -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.9-rc2-part2' of git://people.freedesktop.org/~airlied/linux Pull more drm fixes from Dave Airlie: "Mainly some vmwgfx fixes, but also some fixes for armada, etnaviv and fsl-dcu" * tag 'drm-fixes-for-v4.9-rc2-part2' of git://people.freedesktop.org/~airlied/linux: drm/fsl-dcu: enable pixel clock when enabling CRTC drm/fsl-dcu: do not transfer registers in mode_set_nofb drm/fsl-dcu: do not transfer registers on plane init drm/fsl-dcu: enable TCON bypass mode by default drm/vmwgfx: Adjust checks for null pointers in 13 functions drm/vmwgfx: Use memdup_user() rather than duplicating its implementation drm/vmwgfx: Use kmalloc_array() in vmw_surface_define_ioctl() drm/vmwgfx: Avoid validating views on view destruction drm/vmwgfx: Limit the user-space command buffer size drm/vmwgfx: Remove a leftover debug printout drm/vmwgfx: Allow resource relocations on byte boundaries drm/vmwgfx: Enable SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command drm/vmwgfx: Remove call to reservation_object_test_signaled_rcu before wait drm/vmwgfx: Replace numeric parameter like 0444 with macro drm/etnaviv: block 64K of address space behind each cmdstream drm/etnaviv: ensure write caches are flushed at end of user cmdstream drm/armada: fix clock counts
This commit is contained in:
commit
ec366cd195
|
@ -332,17 +332,19 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
|
|||
{
|
||||
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
|
||||
|
||||
if (dcrtc->dpms != dpms) {
|
||||
dcrtc->dpms = dpms;
|
||||
if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
|
||||
WARN_ON(clk_prepare_enable(dcrtc->clk));
|
||||
armada_drm_crtc_update(dcrtc);
|
||||
if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
|
||||
clk_disable_unprepare(dcrtc->clk);
|
||||
if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
|
||||
if (dpms_blanked(dpms))
|
||||
armada_drm_vblank_off(dcrtc);
|
||||
else
|
||||
else if (!IS_ERR(dcrtc->clk))
|
||||
WARN_ON(clk_prepare_enable(dcrtc->clk));
|
||||
dcrtc->dpms = dpms;
|
||||
armada_drm_crtc_update(dcrtc);
|
||||
if (!dpms_blanked(dpms))
|
||||
drm_crtc_vblank_on(&dcrtc->crtc);
|
||||
else if (!IS_ERR(dcrtc->clk))
|
||||
clk_disable_unprepare(dcrtc->clk);
|
||||
} else if (dcrtc->dpms != dpms) {
|
||||
dcrtc->dpms = dpms;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -329,20 +329,34 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
|||
/*
|
||||
* Append a LINK to the submitted command buffer to return to
|
||||
* the ring buffer. return_target is the ring target address.
|
||||
* We need three dwords: event, wait, link.
|
||||
* We need at most 7 dwords in the return target: 2 cache flush +
|
||||
* 2 semaphore stall + 1 event + 1 wait + 1 link.
|
||||
*/
|
||||
return_dwords = 3;
|
||||
return_dwords = 7;
|
||||
return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
|
||||
CMD_LINK(cmdbuf, return_dwords, return_target);
|
||||
|
||||
/*
|
||||
* Append event, wait and link pointing back to the wait
|
||||
* command to the ring buffer.
|
||||
* Append a cache flush, stall, event, wait and link pointing back to
|
||||
* the wait command to the ring buffer.
|
||||
*/
|
||||
if (gpu->exec_state == ETNA_PIPE_2D) {
|
||||
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
|
||||
VIVS_GL_FLUSH_CACHE_PE2D);
|
||||
} else {
|
||||
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
|
||||
VIVS_GL_FLUSH_CACHE_DEPTH |
|
||||
VIVS_GL_FLUSH_CACHE_COLOR);
|
||||
CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
|
||||
VIVS_TS_FLUSH_CACHE_FLUSH);
|
||||
}
|
||||
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
||||
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
||||
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
|
||||
VIVS_GL_EVENT_FROM_PE);
|
||||
CMD_WAIT(buffer);
|
||||
CMD_LINK(buffer, 2, return_target + 8);
|
||||
CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
|
||||
buffer->user_size - 4);
|
||||
|
||||
if (drm_debug & DRM_UT_DRIVER)
|
||||
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
|
||||
|
|
|
@ -330,7 +330,8 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
|
|||
return (u32)buf->vram_node.start;
|
||||
|
||||
mutex_lock(&mmu->lock);
|
||||
ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size);
|
||||
ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
|
||||
buf->size + SZ_64K);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&mmu->lock);
|
||||
return 0;
|
||||
|
|
|
@ -51,6 +51,7 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
|
|||
DCU_MODE_DCU_MODE(DCU_MODE_OFF));
|
||||
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
|
||||
DCU_UPDATE_MODE_READREG);
|
||||
clk_disable_unprepare(fsl_dev->pix_clk);
|
||||
}
|
||||
|
||||
static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
|
||||
|
@ -58,6 +59,7 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
|
||||
|
||||
clk_prepare_enable(fsl_dev->pix_clk);
|
||||
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
|
||||
DCU_MODE_DCU_MODE_MASK,
|
||||
DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
|
||||
|
@ -116,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
|||
DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
|
||||
DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
|
||||
DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
|
||||
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
|
||||
DCU_UPDATE_MODE_READREG);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -267,12 +267,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(fsl_dev->pix_clk);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to enable pix clk\n");
|
||||
goto disable_dcu_clk;
|
||||
}
|
||||
|
||||
if (fsl_dev->tcon)
|
||||
fsl_tcon_bypass_enable(fsl_dev->tcon);
|
||||
fsl_dcu_drm_init_planes(fsl_dev->drm);
|
||||
drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
|
||||
|
||||
|
@ -284,10 +280,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
|
|||
enable_irq(fsl_dev->irq);
|
||||
|
||||
return 0;
|
||||
|
||||
disable_dcu_clk:
|
||||
clk_disable_unprepare(fsl_dev->clk);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -401,18 +393,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
|
|||
goto disable_clk;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(fsl_dev->pix_clk);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to enable pix clk\n");
|
||||
goto unregister_pix_clk;
|
||||
}
|
||||
|
||||
fsl_dev->tcon = fsl_tcon_init(dev);
|
||||
|
||||
drm = drm_dev_alloc(driver, dev);
|
||||
if (IS_ERR(drm)) {
|
||||
ret = PTR_ERR(drm);
|
||||
goto disable_pix_clk;
|
||||
goto unregister_pix_clk;
|
||||
}
|
||||
|
||||
fsl_dev->dev = dev;
|
||||
|
@ -433,8 +419,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
|
|||
|
||||
unref:
|
||||
drm_dev_unref(drm);
|
||||
disable_pix_clk:
|
||||
clk_disable_unprepare(fsl_dev->pix_clk);
|
||||
unregister_pix_clk:
|
||||
clk_unregister(fsl_dev->pix_clk);
|
||||
disable_clk:
|
||||
|
@ -447,7 +431,6 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
|
|||
struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
|
||||
|
||||
clk_disable_unprepare(fsl_dev->clk);
|
||||
clk_disable_unprepare(fsl_dev->pix_clk);
|
||||
clk_unregister(fsl_dev->pix_clk);
|
||||
drm_put_dev(fsl_dev->drm);
|
||||
|
||||
|
|
|
@ -211,11 +211,6 @@ void fsl_dcu_drm_init_planes(struct drm_device *dev)
|
|||
for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
|
||||
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
|
||||
}
|
||||
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
|
||||
DCU_MODE_DCU_MODE_MASK,
|
||||
DCU_MODE_DCU_MODE(DCU_MODE_OFF));
|
||||
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
|
||||
DCU_UPDATE_MODE_READREG);
|
||||
}
|
||||
|
||||
struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
|
||||
|
|
|
@ -20,38 +20,6 @@
|
|||
#include "fsl_dcu_drm_drv.h"
|
||||
#include "fsl_tcon.h"
|
||||
|
||||
static int
|
||||
fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
|
||||
|
||||
if (fsl_dev->tcon)
|
||||
fsl_tcon_bypass_disable(fsl_dev->tcon);
|
||||
}
|
||||
|
||||
static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
|
||||
|
||||
if (fsl_dev->tcon)
|
||||
fsl_tcon_bypass_enable(fsl_dev->tcon);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
|
||||
.atomic_check = fsl_dcu_drm_encoder_atomic_check,
|
||||
.disable = fsl_dcu_drm_encoder_disable,
|
||||
.enable = fsl_dcu_drm_encoder_enable,
|
||||
};
|
||||
|
||||
static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
drm_encoder_cleanup(encoder);
|
||||
|
@ -68,13 +36,16 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
|
|||
int ret;
|
||||
|
||||
encoder->possible_crtcs = 1;
|
||||
|
||||
/* Use bypass mode for parallel RGB/LVDS encoder */
|
||||
if (fsl_dev->tcon)
|
||||
fsl_tcon_bypass_enable(fsl_dev->tcon);
|
||||
|
||||
ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
|
||||
DRM_MODE_ENCODER_LVDS, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
drm_encoder_helper_add(encoder, &encoder_helper_funcs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
|
|||
void *ptr);
|
||||
|
||||
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
|
||||
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
|
||||
module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
|
||||
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
|
||||
module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
|
||||
module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
|
||||
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
|
||||
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
|
||||
module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
|
||||
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
|
||||
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
|
||||
module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
|
||||
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
|
||||
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
|
||||
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
|
||||
MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
|
||||
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
|
||||
#define VMWGFX_DRIVER_DATE "20160210"
|
||||
#define VMWGFX_DRIVER_MAJOR 2
|
||||
#define VMWGFX_DRIVER_MINOR 10
|
||||
#define VMWGFX_DRIVER_MINOR 11
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 0
|
||||
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
|
||||
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
|
||||
|
|
|
@ -34,18 +34,38 @@
|
|||
|
||||
#define VMW_RES_HT_ORDER 12
|
||||
|
||||
/**
|
||||
* enum vmw_resource_relocation_type - Relocation type for resources
|
||||
*
|
||||
* @vmw_res_rel_normal: Traditional relocation. The resource id in the
|
||||
* command stream is replaced with the actual id after validation.
|
||||
* @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
|
||||
* with a NOP.
|
||||
* @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
|
||||
* after validation is -1, the command is replaced with a NOP. Otherwise no
|
||||
* action.
|
||||
*/
|
||||
enum vmw_resource_relocation_type {
|
||||
vmw_res_rel_normal,
|
||||
vmw_res_rel_nop,
|
||||
vmw_res_rel_cond_nop,
|
||||
vmw_res_rel_max
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_resource_relocation - Relocation info for resources
|
||||
*
|
||||
* @head: List head for the software context's relocation list.
|
||||
* @res: Non-ref-counted pointer to the resource.
|
||||
* @offset: Offset of 4 byte entries into the command buffer where the
|
||||
* @offset: Offset of single byte entries into the command buffer where the
|
||||
* id that needs fixup is located.
|
||||
* @rel_type: Type of relocation.
|
||||
*/
|
||||
struct vmw_resource_relocation {
|
||||
struct list_head head;
|
||||
const struct vmw_resource *res;
|
||||
unsigned long offset;
|
||||
u32 offset:29;
|
||||
enum vmw_resource_relocation_type rel_type:3;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -109,7 +129,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
|
|||
struct vmw_dma_buffer *vbo,
|
||||
bool validate_as_mob,
|
||||
uint32_t *p_val_node);
|
||||
|
||||
/**
|
||||
* vmw_ptr_diff - Compute the offset from a to b in bytes
|
||||
*
|
||||
* @a: A starting pointer.
|
||||
* @b: A pointer offset in the same address space.
|
||||
*
|
||||
* Returns: The offset in bytes between the two pointers.
|
||||
*/
|
||||
static size_t vmw_ptr_diff(void *a, void *b)
|
||||
{
|
||||
return (unsigned long) b - (unsigned long) a;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_resources_unreserve - unreserve resources previously reserved for
|
||||
|
@ -409,11 +440,14 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
|
|||
* @list: Pointer to head of relocation list.
|
||||
* @res: The resource.
|
||||
* @offset: Offset into the command buffer currently being parsed where the
|
||||
* id that needs fixup is located. Granularity is 4 bytes.
|
||||
* id that needs fixup is located. Granularity is one byte.
|
||||
* @rel_type: Relocation type.
|
||||
*/
|
||||
static int vmw_resource_relocation_add(struct list_head *list,
|
||||
const struct vmw_resource *res,
|
||||
unsigned long offset)
|
||||
unsigned long offset,
|
||||
enum vmw_resource_relocation_type
|
||||
rel_type)
|
||||
{
|
||||
struct vmw_resource_relocation *rel;
|
||||
|
||||
|
@ -425,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
|
|||
|
||||
rel->res = res;
|
||||
rel->offset = offset;
|
||||
rel->rel_type = rel_type;
|
||||
list_add_tail(&rel->head, list);
|
||||
|
||||
return 0;
|
||||
|
@ -459,11 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
|
|||
{
|
||||
struct vmw_resource_relocation *rel;
|
||||
|
||||
/* Validate the struct vmw_resource_relocation member size */
|
||||
BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
|
||||
BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
|
||||
|
||||
list_for_each_entry(rel, list, head) {
|
||||
if (likely(rel->res != NULL))
|
||||
cb[rel->offset] = rel->res->id;
|
||||
else
|
||||
cb[rel->offset] = SVGA_3D_CMD_NOP;
|
||||
u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
|
||||
switch (rel->rel_type) {
|
||||
case vmw_res_rel_normal:
|
||||
*addr = rel->res->id;
|
||||
break;
|
||||
case vmw_res_rel_nop:
|
||||
*addr = SVGA_3D_CMD_NOP;
|
||||
break;
|
||||
default:
|
||||
if (rel->res->id == -1)
|
||||
*addr = SVGA_3D_CMD_NOP;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -655,7 +703,9 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
|
|||
*p_val = NULL;
|
||||
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
|
||||
res,
|
||||
id_loc - sw_context->buf_start);
|
||||
vmw_ptr_diff(sw_context->buf_start,
|
||||
id_loc),
|
||||
vmw_res_rel_normal);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
|
@ -721,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
|
|||
|
||||
return vmw_resource_relocation_add
|
||||
(&sw_context->res_relocations, res,
|
||||
id_loc - sw_context->buf_start);
|
||||
vmw_ptr_diff(sw_context->buf_start, id_loc),
|
||||
vmw_res_rel_normal);
|
||||
}
|
||||
|
||||
ret = vmw_user_resource_lookup_handle(dev_priv,
|
||||
|
@ -2143,10 +2194,10 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
|
|||
return ret;
|
||||
|
||||
return vmw_resource_relocation_add(&sw_context->res_relocations,
|
||||
NULL, &cmd->header.id -
|
||||
sw_context->buf_start);
|
||||
|
||||
return 0;
|
||||
NULL,
|
||||
vmw_ptr_diff(sw_context->buf_start,
|
||||
&cmd->header.id),
|
||||
vmw_res_rel_nop);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2188,10 +2239,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
|
|||
return ret;
|
||||
|
||||
return vmw_resource_relocation_add(&sw_context->res_relocations,
|
||||
NULL, &cmd->header.id -
|
||||
sw_context->buf_start);
|
||||
|
||||
return 0;
|
||||
NULL,
|
||||
vmw_ptr_diff(sw_context->buf_start,
|
||||
&cmd->header.id),
|
||||
vmw_res_rel_nop);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2848,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
|
|||
* @header: Pointer to the command header in the command stream.
|
||||
*
|
||||
* Check that the view exists, and if it was not created using this
|
||||
* command batch, make sure it's validated (present in the device) so that
|
||||
* the remove command will not confuse the device.
|
||||
* command batch, conditionally make this command a NOP.
|
||||
*/
|
||||
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
|
@ -2877,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
|
|||
return ret;
|
||||
|
||||
/*
|
||||
* Add view to the validate list iff it was not created using this
|
||||
* command batch.
|
||||
* If the view wasn't created during this command batch, it might
|
||||
* have been removed due to a context swapout, so add a
|
||||
* relocation to conditionally make this command a NOP to avoid
|
||||
* device errors.
|
||||
*/
|
||||
return vmw_view_res_val_add(sw_context, view);
|
||||
return vmw_resource_relocation_add(&sw_context->res_relocations,
|
||||
view,
|
||||
vmw_ptr_diff(sw_context->buf_start,
|
||||
&cmd->header.id),
|
||||
vmw_res_rel_cond_nop);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3029,6 +3085,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
|
|||
cmd->body.shaderResourceViewId);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cmd_dx_transfer_from_buffer -
|
||||
* Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
|
||||
*
|
||||
* @dev_priv: Pointer to a device private struct.
|
||||
* @sw_context: The software context being used for this batch.
|
||||
* @header: Pointer to the command header in the command stream.
|
||||
*/
|
||||
static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDXTransferFromBuffer body;
|
||||
} *cmd = container_of(header, typeof(*cmd), header);
|
||||
int ret;
|
||||
|
||||
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
||||
user_surface_converter,
|
||||
&cmd->body.srcSid, NULL);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
||||
user_surface_converter,
|
||||
&cmd->body.destSid, NULL);
|
||||
}
|
||||
|
||||
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
void *buf, uint32_t *size)
|
||||
|
@ -3379,6 +3464,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
|
|||
&vmw_cmd_buffer_copy_check, true, false, true),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
|
||||
&vmw_cmd_pred_copy_check, true, false, true),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
|
||||
&vmw_cmd_dx_transfer_from_buffer,
|
||||
true, false, true),
|
||||
};
|
||||
|
||||
static int vmw_cmd_check(struct vmw_private *dev_priv,
|
||||
|
@ -3848,14 +3936,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
|
|||
int ret;
|
||||
|
||||
*header = NULL;
|
||||
if (!dev_priv->cman || kernel_commands)
|
||||
return kernel_commands;
|
||||
|
||||
if (command_size > SVGA_CB_MAX_SIZE) {
|
||||
DRM_ERROR("Command buffer is too large.\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (!dev_priv->cman || kernel_commands)
|
||||
return kernel_commands;
|
||||
|
||||
/* If possible, add a little space for fencing. */
|
||||
cmdbuf_size = command_size + 512;
|
||||
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
|
||||
|
@ -4232,9 +4320,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
|
|||
ttm_bo_unref(&query_val.bo);
|
||||
ttm_bo_unref(&pinned_val.bo);
|
||||
vmw_dmabuf_unreference(&dev_priv->pinned_bo);
|
||||
DRM_INFO("Dummy query bo pin count: %d\n",
|
||||
dev_priv->dummy_query_bo->pin_count);
|
||||
|
||||
out_unlock:
|
||||
return;
|
||||
|
||||
|
|
|
@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
|
|||
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
|
||||
long lret;
|
||||
|
||||
if (nonblock)
|
||||
return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
|
||||
|
||||
lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
|
||||
lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
|
||||
nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
|
||||
if (!lret)
|
||||
return -EBUSY;
|
||||
else if (lret < 0)
|
||||
|
|
|
@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
|
|||
if (res->id != -1) {
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
|
||||
if (unlikely(cmd == NULL)) {
|
||||
if (unlikely(!cmd)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"destruction.\n");
|
||||
return;
|
||||
|
@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
|
|||
|
||||
submit_size = vmw_surface_define_size(srf);
|
||||
cmd = vmw_fifo_reserve(dev_priv, submit_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
if (unlikely(!cmd)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"creation.\n");
|
||||
ret = -ENOMEM;
|
||||
|
@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
|
|||
uint8_t *cmd;
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
BUG_ON(val_buf->bo == NULL);
|
||||
|
||||
BUG_ON(!val_buf->bo);
|
||||
submit_size = vmw_surface_dma_size(srf);
|
||||
cmd = vmw_fifo_reserve(dev_priv, submit_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
if (unlikely(!cmd)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"DMA.\n");
|
||||
return -ENOMEM;
|
||||
|
@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
|
|||
|
||||
submit_size = vmw_surface_destroy_size();
|
||||
cmd = vmw_fifo_reserve(dev_priv, submit_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
if (unlikely(!cmd)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"eviction.\n");
|
||||
return -ENOMEM;
|
||||
|
@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
|
|||
int ret;
|
||||
struct vmw_resource *res = &srf->res;
|
||||
|
||||
BUG_ON(res_free == NULL);
|
||||
BUG_ON(!res_free);
|
||||
if (!dev_priv->has_mob)
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
ret = vmw_resource_init(dev_priv, res, true, res_free,
|
||||
|
@ -700,7 +699,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_vmw_surface_create_req *req = &arg->req;
|
||||
struct drm_vmw_surface_arg *rep = &arg->rep;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct drm_vmw_size __user *user_sizes;
|
||||
int ret;
|
||||
int i, j;
|
||||
uint32_t cur_bo_offset;
|
||||
|
@ -748,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
|
||||
if (unlikely(user_srf == NULL)) {
|
||||
if (unlikely(!user_srf)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_user_srf;
|
||||
}
|
||||
|
@ -763,29 +761,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
|
||||
srf->num_sizes = num_sizes;
|
||||
user_srf->size = size;
|
||||
|
||||
srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
|
||||
if (unlikely(srf->sizes == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
|
||||
req->size_addr,
|
||||
sizeof(*srf->sizes) * srf->num_sizes);
|
||||
if (IS_ERR(srf->sizes)) {
|
||||
ret = PTR_ERR(srf->sizes);
|
||||
goto out_no_sizes;
|
||||
}
|
||||
srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
|
||||
GFP_KERNEL);
|
||||
if (unlikely(srf->offsets == NULL)) {
|
||||
srf->offsets = kmalloc_array(srf->num_sizes,
|
||||
sizeof(*srf->offsets),
|
||||
GFP_KERNEL);
|
||||
if (unlikely(!srf->offsets)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_offsets;
|
||||
}
|
||||
|
||||
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
|
||||
req->size_addr;
|
||||
|
||||
ret = copy_from_user(srf->sizes, user_sizes,
|
||||
srf->num_sizes * sizeof(*srf->sizes));
|
||||
if (unlikely(ret != 0)) {
|
||||
ret = -EFAULT;
|
||||
goto out_no_copy;
|
||||
}
|
||||
|
||||
srf->base_size = *srf->sizes;
|
||||
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
|
||||
srf->multisample_count = 0;
|
||||
|
@ -923,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
|
|||
|
||||
ret = -EINVAL;
|
||||
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
|
||||
if (unlikely(base == NULL)) {
|
||||
if (unlikely(!base)) {
|
||||
DRM_ERROR("Could not find surface to reference.\n");
|
||||
goto out_no_lookup;
|
||||
}
|
||||
|
@ -1069,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
|
|||
|
||||
cmd = vmw_fifo_reserve(dev_priv, submit_len);
|
||||
cmd2 = (typeof(cmd2))cmd;
|
||||
if (unlikely(cmd == NULL)) {
|
||||
if (unlikely(!cmd)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"creation.\n");
|
||||
ret = -ENOMEM;
|
||||
|
@ -1135,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
|
|||
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
|
||||
|
||||
cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
|
||||
if (unlikely(cmd1 == NULL)) {
|
||||
if (unlikely(!cmd1)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"binding.\n");
|
||||
return -ENOMEM;
|
||||
|
@ -1185,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
|
|||
|
||||
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
|
||||
cmd = vmw_fifo_reserve(dev_priv, submit_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
if (unlikely(!cmd)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"unbinding.\n");
|
||||
return -ENOMEM;
|
||||
|
@ -1244,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
|
|||
vmw_binding_res_list_scrub(&res->binding_head);
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
if (unlikely(!cmd)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"destruction.\n");
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
|
@ -1410,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
user_srf = container_of(base, struct vmw_user_surface, prime.base);
|
||||
srf = &user_srf->srf;
|
||||
if (srf->res.backup == NULL) {
|
||||
if (!srf->res.backup) {
|
||||
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
|
||||
goto out_bad_resource;
|
||||
}
|
||||
|
@ -1524,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
|
|||
}
|
||||
|
||||
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
|
||||
if (unlikely(user_srf == NULL)) {
|
||||
if (unlikely(!user_srf)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_user_srf;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue