Merge branch 'drm-vmwgfx-fixes' of git://people.freedesktop.org/~syeh/repos_linux into drm-fixes
misc vmwgfx fixes. * 'drm-vmwgfx-fixes' of git://people.freedesktop.org/~syeh/repos_linux: drm/vmwgfx: constify pci_device_id. drm/vmwgfx: Fix gcc-7.1.1 warning drm/vmwgfx: Fix cursor hotspot issue with Wayland on Fedora drm/vmwgfx: Limit max desktop dimensions to 8Kx8K drm/vmwgfx: dma-buf: Constify ttm_place structures. drm/vmwgfx: fix comment mistake for vmw_cmd_dx_set_index_buffer() drm/vmwgfx: Use dma_pool_zalloc drm/vmwgfx: Fix handling of errors returned by 'vmw_cotable_alloc()' drm/vmwgfx: Fix NULL pointer comparison
This commit is contained in:
commit
739b000994
|
@ -30,49 +30,49 @@
|
||||||
#include <drm/ttm/ttm_placement.h>
|
#include <drm/ttm/ttm_placement.h>
|
||||||
#include <drm/ttm/ttm_page_alloc.h>
|
#include <drm/ttm/ttm_page_alloc.h>
|
||||||
|
|
||||||
static struct ttm_place vram_placement_flags = {
|
static const struct ttm_place vram_placement_flags = {
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
|
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ttm_place vram_ne_placement_flags = {
|
static const struct ttm_place vram_ne_placement_flags = {
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
|
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ttm_place sys_placement_flags = {
|
static const struct ttm_place sys_placement_flags = {
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
|
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ttm_place sys_ne_placement_flags = {
|
static const struct ttm_place sys_ne_placement_flags = {
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
|
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ttm_place gmr_placement_flags = {
|
static const struct ttm_place gmr_placement_flags = {
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
|
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ttm_place gmr_ne_placement_flags = {
|
static const struct ttm_place gmr_ne_placement_flags = {
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
|
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ttm_place mob_placement_flags = {
|
static const struct ttm_place mob_placement_flags = {
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
|
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ttm_place mob_ne_placement_flags = {
|
static const struct ttm_place mob_ne_placement_flags = {
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
|
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
|
||||||
|
@ -85,7 +85,7 @@ struct ttm_placement vmw_vram_placement = {
|
||||||
.busy_placement = &vram_placement_flags
|
.busy_placement = &vram_placement_flags
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ttm_place vram_gmr_placement_flags[] = {
|
static const struct ttm_place vram_gmr_placement_flags[] = {
|
||||||
{
|
{
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
|
@ -97,7 +97,7 @@ static struct ttm_place vram_gmr_placement_flags[] = {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ttm_place gmr_vram_placement_flags[] = {
|
static const struct ttm_place gmr_vram_placement_flags[] = {
|
||||||
{
|
{
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
|
@ -116,7 +116,7 @@ struct ttm_placement vmw_vram_gmr_placement = {
|
||||||
.busy_placement = &gmr_placement_flags
|
.busy_placement = &gmr_placement_flags
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ttm_place vram_gmr_ne_placement_flags[] = {
|
static const struct ttm_place vram_gmr_ne_placement_flags[] = {
|
||||||
{
|
{
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
|
@ -165,7 +165,7 @@ struct ttm_placement vmw_sys_ne_placement = {
|
||||||
.busy_placement = &sys_ne_placement_flags
|
.busy_placement = &sys_ne_placement_flags
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ttm_place evictable_placement_flags[] = {
|
static const struct ttm_place evictable_placement_flags[] = {
|
||||||
{
|
{
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
|
|
|
@ -779,8 +779,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
|
header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
|
||||||
&header->handle);
|
&header->handle);
|
||||||
if (!header->cb_header) {
|
if (!header->cb_header) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_no_cb_header;
|
goto out_no_cb_header;
|
||||||
|
@ -790,7 +790,6 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
|
||||||
cb_hdr = header->cb_header;
|
cb_hdr = header->cb_header;
|
||||||
offset = header->node.start << PAGE_SHIFT;
|
offset = header->node.start << PAGE_SHIFT;
|
||||||
header->cmd = man->map + offset;
|
header->cmd = man->map + offset;
|
||||||
memset(cb_hdr, 0, sizeof(*cb_hdr));
|
|
||||||
if (man->using_mob) {
|
if (man->using_mob) {
|
||||||
cb_hdr->flags = SVGA_CB_FLAG_MOB;
|
cb_hdr->flags = SVGA_CB_FLAG_MOB;
|
||||||
cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
|
cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
|
||||||
|
@ -827,8 +826,8 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
|
||||||
if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
|
if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
|
dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
|
||||||
&header->handle);
|
&header->handle);
|
||||||
if (!dheader)
|
if (!dheader)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -837,7 +836,6 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
|
||||||
cb_hdr = &dheader->cb_header;
|
cb_hdr = &dheader->cb_header;
|
||||||
header->cb_header = cb_hdr;
|
header->cb_header = cb_hdr;
|
||||||
header->cmd = dheader->cmd;
|
header->cmd = dheader->cmd;
|
||||||
memset(dheader, 0, sizeof(*dheader));
|
|
||||||
cb_hdr->status = SVGA_CB_STATUS_NONE;
|
cb_hdr->status = SVGA_CB_STATUS_NONE;
|
||||||
cb_hdr->flags = SVGA_CB_FLAG_NONE;
|
cb_hdr->flags = SVGA_CB_FLAG_NONE;
|
||||||
cb_hdr->ptr.pa = (u64)header->handle +
|
cb_hdr->ptr.pa = (u64)header->handle +
|
||||||
|
|
|
@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
cres = kzalloc(sizeof(*cres), GFP_KERNEL);
|
cres = kzalloc(sizeof(*cres), GFP_KERNEL);
|
||||||
if (unlikely(cres == NULL))
|
if (unlikely(!cres))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cres->hash.key = user_key | (res_type << 24);
|
cres->hash.key = user_key | (res_type << 24);
|
||||||
|
@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
man = kzalloc(sizeof(*man), GFP_KERNEL);
|
man = kzalloc(sizeof(*man), GFP_KERNEL);
|
||||||
if (man == NULL)
|
if (!man)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
man->dev_priv = dev_priv;
|
man->dev_priv = dev_priv;
|
||||||
|
|
|
@ -210,8 +210,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
|
||||||
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
|
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
|
||||||
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
|
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
|
||||||
&uctx->res, i);
|
&uctx->res, i);
|
||||||
if (unlikely(uctx->cotables[i] == NULL)) {
|
if (unlikely(IS_ERR(uctx->cotables[i]))) {
|
||||||
ret = -ENOMEM;
|
ret = PTR_ERR(uctx->cotables[i]);
|
||||||
goto out_cotables;
|
goto out_cotables;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -777,7 +777,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||||
if (unlikely(ctx == NULL)) {
|
if (unlikely(!ctx)) {
|
||||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||||
vmw_user_context_size);
|
vmw_user_context_size);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
|
|
@ -584,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
|
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
|
||||||
if (unlikely(vcotbl == NULL)) {
|
if (unlikely(!vcotbl)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_no_alloc;
|
goto out_no_alloc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -227,7 +227,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
|
||||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct pci_device_id vmw_pci_id_list[] = {
|
static const struct pci_device_id vmw_pci_id_list[] = {
|
||||||
{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
|
{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
|
||||||
{0, 0, 0}
|
{0, 0, 0}
|
||||||
};
|
};
|
||||||
|
@ -630,7 +630,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||||
char host_log[100] = {0};
|
char host_log[100] = {0};
|
||||||
|
|
||||||
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
|
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
|
||||||
if (unlikely(dev_priv == NULL)) {
|
if (unlikely(!dev_priv)) {
|
||||||
DRM_ERROR("Failed allocating a device private struct.\n");
|
DRM_ERROR("Failed allocating a device private struct.\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -1035,7 +1035,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
|
vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
|
||||||
if (unlikely(vmw_fp == NULL))
|
if (unlikely(!vmw_fp))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
|
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
|
||||||
|
@ -1196,7 +1196,7 @@ static int vmw_master_create(struct drm_device *dev,
|
||||||
struct vmw_master *vmaster;
|
struct vmw_master *vmaster;
|
||||||
|
|
||||||
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
|
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
|
||||||
if (unlikely(vmaster == NULL))
|
if (unlikely(!vmaster))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
vmw_master_init(vmaster);
|
vmw_master_init(vmaster);
|
||||||
|
|
|
@ -264,7 +264,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
|
||||||
}
|
}
|
||||||
|
|
||||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||||
if (unlikely(node == NULL)) {
|
if (unlikely(!node)) {
|
||||||
DRM_ERROR("Failed to allocate a resource validation "
|
DRM_ERROR("Failed to allocate a resource validation "
|
||||||
"entry.\n");
|
"entry.\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -452,7 +452,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
|
||||||
struct vmw_resource_relocation *rel;
|
struct vmw_resource_relocation *rel;
|
||||||
|
|
||||||
rel = kmalloc(sizeof(*rel), GFP_KERNEL);
|
rel = kmalloc(sizeof(*rel), GFP_KERNEL);
|
||||||
if (unlikely(rel == NULL)) {
|
if (unlikely(!rel)) {
|
||||||
DRM_ERROR("Failed to allocate a resource relocation.\n");
|
DRM_ERROR("Failed to allocate a resource relocation.\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
|
||||||
struct vmw_sw_context *sw_context,
|
struct vmw_sw_context *sw_context,
|
||||||
SVGA3dCmdHeader *header)
|
SVGA3dCmdHeader *header)
|
||||||
{
|
{
|
||||||
return capable(CAP_SYS_ADMIN) ? : -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vmw_cmd_ok(struct vmw_private *dev_priv,
|
static int vmw_cmd_ok(struct vmw_private *dev_priv,
|
||||||
|
@ -2584,7 +2584,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_cmd_dx_ia_set_vertex_buffers - Validate an
|
* vmw_cmd_dx_ia_set_vertex_buffers - Validate an
|
||||||
* SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
|
* SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
|
||||||
*
|
*
|
||||||
* @dev_priv: Pointer to a device private struct.
|
* @dev_priv: Pointer to a device private struct.
|
||||||
* @sw_context: The software context being used for this batch.
|
* @sw_context: The software context being used for this batch.
|
||||||
|
|
|
@ -284,7 +284,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
|
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
|
||||||
|
|
||||||
if (unlikely(fman == NULL))
|
if (unlikely(!fman))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
fman->dev_priv = dev_priv;
|
fman->dev_priv = dev_priv;
|
||||||
|
@ -541,7 +541,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
|
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
|
||||||
if (unlikely(fence == NULL))
|
if (unlikely(!fence))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = vmw_fence_obj_init(fman, fence, seqno,
|
ret = vmw_fence_obj_init(fman, fence, seqno,
|
||||||
|
@ -606,7 +606,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
|
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
|
||||||
if (unlikely(ufence == NULL)) {
|
if (unlikely(!ufence)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_no_object;
|
goto out_no_object;
|
||||||
}
|
}
|
||||||
|
@ -966,7 +966,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
|
||||||
struct vmw_fence_manager *fman = fman_from_fence(fence);
|
struct vmw_fence_manager *fman = fman_from_fence(fence);
|
||||||
|
|
||||||
eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
|
eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
|
||||||
if (unlikely(eaction == NULL))
|
if (unlikely(!eaction))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
eaction->event = event;
|
eaction->event = event;
|
||||||
|
@ -1002,7 +1002,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
event = kzalloc(sizeof(*event), GFP_KERNEL);
|
event = kzalloc(sizeof(*event), GFP_KERNEL);
|
||||||
if (unlikely(event == NULL)) {
|
if (unlikely(!event)) {
|
||||||
DRM_ERROR("Failed to allocate an event.\n");
|
DRM_ERROR("Failed to allocate an event.\n");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_no_space;
|
goto out_no_space;
|
||||||
|
|
|
@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
|
||||||
struct vmwgfx_gmrid_man *gman =
|
struct vmwgfx_gmrid_man *gman =
|
||||||
kzalloc(sizeof(*gman), GFP_KERNEL);
|
kzalloc(sizeof(*gman), GFP_KERNEL);
|
||||||
|
|
||||||
if (unlikely(gman == NULL))
|
if (unlikely(!gman))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
spin_lock_init(&gman->lock);
|
spin_lock_init(&gman->lock);
|
||||||
|
|
|
@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
|
||||||
|
|
||||||
hotspot_x = du->hotspot_x;
|
hotspot_x = du->hotspot_x;
|
||||||
hotspot_y = du->hotspot_y;
|
hotspot_y = du->hotspot_y;
|
||||||
|
|
||||||
|
if (plane->fb) {
|
||||||
|
hotspot_x += plane->fb->hot_x;
|
||||||
|
hotspot_y += plane->fb->hot_y;
|
||||||
|
}
|
||||||
|
|
||||||
du->cursor_surface = vps->surf;
|
du->cursor_surface = vps->surf;
|
||||||
du->cursor_dmabuf = vps->dmabuf;
|
du->cursor_dmabuf = vps->dmabuf;
|
||||||
|
|
||||||
|
@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
|
||||||
vmw_cursor_update_position(dev_priv, true,
|
vmw_cursor_update_position(dev_priv, true,
|
||||||
du->cursor_x + hotspot_x,
|
du->cursor_x + hotspot_x,
|
||||||
du->cursor_y + hotspot_y);
|
du->cursor_y + hotspot_y);
|
||||||
|
|
||||||
|
du->core_hotspot_x = hotspot_x - du->hotspot_x;
|
||||||
|
du->core_hotspot_y = hotspot_y - du->hotspot_y;
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("Failed to update cursor image\n");
|
DRM_ERROR("Failed to update cursor image\n");
|
||||||
}
|
}
|
||||||
|
|
|
@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
|
||||||
|
|
||||||
if (dev_priv->has_dx) {
|
if (dev_priv->has_dx) {
|
||||||
*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
|
*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
|
||||||
if (*otables == NULL)
|
if (!(*otables))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
|
dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
|
||||||
} else {
|
} else {
|
||||||
*otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
|
*otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (*otables == NULL)
|
if (!(*otables))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
|
dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
|
||||||
|
@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
|
||||||
{
|
{
|
||||||
struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
|
struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
|
||||||
|
|
||||||
if (unlikely(mob == NULL))
|
if (unlikely(!mob))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
|
mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
|
||||||
|
|
|
@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
|
||||||
|
|
||||||
reply_len = ebx;
|
reply_len = ebx;
|
||||||
reply = kzalloc(reply_len + 1, GFP_KERNEL);
|
reply = kzalloc(reply_len + 1, GFP_KERNEL);
|
||||||
if (reply == NULL) {
|
if (!reply) {
|
||||||
DRM_ERROR("Cannot allocate memory for reply\n");
|
DRM_ERROR("Cannot allocate memory for reply\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
|
||||||
|
|
||||||
msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
|
msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
|
||||||
msg = kzalloc(msg_len, GFP_KERNEL);
|
msg = kzalloc(msg_len, GFP_KERNEL);
|
||||||
if (msg == NULL) {
|
if (!msg) {
|
||||||
DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
|
DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -400,7 +400,7 @@ int vmw_host_log(const char *log)
|
||||||
|
|
||||||
msg_len = strlen(log) + strlen("log ") + 1;
|
msg_len = strlen(log) + strlen("log ") + 1;
|
||||||
msg = kzalloc(msg_len, GFP_KERNEL);
|
msg = kzalloc(msg_len, GFP_KERNEL);
|
||||||
if (msg == NULL) {
|
if (!msg) {
|
||||||
DRM_ERROR("Cannot allocate memory for log message\n");
|
DRM_ERROR("Cannot allocate memory for log message\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
|
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
|
||||||
if (unlikely(user_bo == NULL)) {
|
if (unlikely(!user_bo)) {
|
||||||
DRM_ERROR("Failed to allocate a buffer.\n");
|
DRM_ERROR("Failed to allocate a buffer.\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
|
||||||
}
|
}
|
||||||
|
|
||||||
backup = kzalloc(sizeof(*backup), GFP_KERNEL);
|
backup = kzalloc(sizeof(*backup), GFP_KERNEL);
|
||||||
if (unlikely(backup == NULL))
|
if (unlikely(!backup))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
|
ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
|
||||||
|
|
|
@ -751,7 +751,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
|
||||||
}
|
}
|
||||||
|
|
||||||
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
|
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
|
||||||
if (unlikely(ushader == NULL)) {
|
if (unlikely(!ushader)) {
|
||||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||||
vmw_user_shader_size);
|
vmw_user_shader_size);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
@ -821,7 +821,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
|
||||||
}
|
}
|
||||||
|
|
||||||
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
|
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
|
||||||
if (unlikely(shader == NULL)) {
|
if (unlikely(!shader)) {
|
||||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||||
vmw_shader_size);
|
vmw_shader_size);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
@ -981,7 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
||||||
|
|
||||||
/* Allocate and pin a DMA buffer */
|
/* Allocate and pin a DMA buffer */
|
||||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||||
if (unlikely(buf == NULL))
|
if (unlikely(!buf))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
|
ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
|
||||||
|
|
|
@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
|
||||||
* something arbitrarily large and we will reject any layout
|
* something arbitrarily large and we will reject any layout
|
||||||
* that doesn't fit prim_bb_mem later
|
* that doesn't fit prim_bb_mem later
|
||||||
*/
|
*/
|
||||||
dev->mode_config.max_width = 16384;
|
dev->mode_config.max_width = 8192;
|
||||||
dev->mode_config.max_height = 16384;
|
dev->mode_config.max_height = 8192;
|
||||||
}
|
}
|
||||||
|
|
||||||
vmw_kms_create_implicit_placement_property(dev_priv, false);
|
vmw_kms_create_implicit_placement_property(dev_priv, false);
|
||||||
|
|
Loading…
Reference in New Issue