mirror of https://gitee.com/openkylin/linux.git
drm/vmwgfx: Avoid cmdbuf alloc sleeping if !TASK_RUNNING
If the command buffer pool is out of space, the code waits until space is available. However since the condition code tries to allocate a range manager node while !TASK_RUNNING we get a kernel warning. Avoid this by pre-allocating the mm node. This will also probably be more efficient. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com>
This commit is contained in:
parent
6bf6bf03b3
commit
9b590783b3
|
@ -33,7 +33,8 @@
|
|||
* multiple of the DMA pool allocation size.
|
||||
*/
|
||||
#define VMW_CMDBUF_INLINE_ALIGN 64
|
||||
#define VMW_CMDBUF_INLINE_SIZE (1024 - VMW_CMDBUF_INLINE_ALIGN)
|
||||
#define VMW_CMDBUF_INLINE_SIZE \
|
||||
(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
|
||||
|
||||
/**
|
||||
* struct vmw_cmdbuf_context - Command buffer context queues
|
||||
|
@ -145,7 +146,7 @@ struct vmw_cmdbuf_header {
|
|||
SVGACBHeader *cb_header;
|
||||
SVGACBContext cb_context;
|
||||
struct list_head list;
|
||||
struct drm_mm_node *node;
|
||||
struct drm_mm_node node;
|
||||
dma_addr_t handle;
|
||||
u8 *cmd;
|
||||
size_t size;
|
||||
|
@ -169,13 +170,13 @@ struct vmw_cmdbuf_dheader {
|
|||
* struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
|
||||
*
|
||||
* @page_size: Size of requested command buffer space in pages.
|
||||
* @node: The range manager node if allocation succeeded.
|
||||
* @ret: Error code if failure. Otherwise 0.
|
||||
* @node: Pointer to the range manager node.
|
||||
* @done: True if this allocation has succeeded.
|
||||
*/
|
||||
struct vmw_cmdbuf_alloc_info {
|
||||
size_t page_size;
|
||||
struct drm_mm_node *node;
|
||||
int ret;
|
||||
bool done;
|
||||
};
|
||||
|
||||
/* Loop over each context in the command buffer manager. */
|
||||
|
@ -253,9 +254,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
|
|||
return;
|
||||
}
|
||||
|
||||
drm_mm_remove_node(header->node);
|
||||
kfree(header->node);
|
||||
header->node = NULL;
|
||||
drm_mm_remove_node(&header->node);
|
||||
wake_up_all(&man->alloc_queue);
|
||||
if (header->cb_header)
|
||||
dma_pool_free(man->headers, header->cb_header,
|
||||
|
@ -669,32 +668,26 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (info->node)
|
||||
if (info->done)
|
||||
return true;
|
||||
|
||||
info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
|
||||
if (!info->node) {
|
||||
info->ret = -ENOMEM;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
memset(info->node, 0, sizeof(*info->node));
|
||||
spin_lock_bh(&man->lock);
|
||||
ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size, 0, 0,
|
||||
ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
|
||||
0, 0,
|
||||
DRM_MM_SEARCH_DEFAULT,
|
||||
DRM_MM_CREATE_DEFAULT);
|
||||
spin_unlock_bh(&man->lock);
|
||||
if (ret) {
|
||||
kfree(info->node);
|
||||
info->node = NULL;
|
||||
}
|
||||
info->done = !ret;
|
||||
|
||||
return !!info->node;
|
||||
return info->done;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
|
||||
*
|
||||
* @man: The command buffer manager.
|
||||
* @node: Pointer to pre-allocated range-manager node.
|
||||
* @size: The size of the allocation.
|
||||
* @interruptible: Whether to sleep interruptible while waiting for space.
|
||||
*
|
||||
|
@ -702,15 +695,16 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
|
|||
* no space available ATM, it turns on IRQ handling and sleeps waiting for it to
|
||||
* become available.
|
||||
*/
|
||||
static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
|
||||
size_t size,
|
||||
bool interruptible)
|
||||
int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
|
||||
struct drm_mm_node *node,
|
||||
size_t size,
|
||||
bool interruptible)
|
||||
{
|
||||
struct vmw_cmdbuf_alloc_info info;
|
||||
|
||||
info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
info.node = NULL;
|
||||
info.ret = 0;
|
||||
info.node = node;
|
||||
info.done = false;
|
||||
|
||||
/*
|
||||
* To prevent starvation of large requests, only one allocating call
|
||||
|
@ -718,22 +712,14 @@ static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
|
|||
*/
|
||||
if (interruptible) {
|
||||
if (mutex_lock_interruptible(&man->space_mutex))
|
||||
return ERR_PTR(-ERESTARTSYS);
|
||||
return -ERESTARTSYS;
|
||||
} else {
|
||||
mutex_lock(&man->space_mutex);
|
||||
}
|
||||
|
||||
/* Try to allocate space without waiting. */
|
||||
(void) vmw_cmdbuf_try_alloc(man, &info);
|
||||
if (info.ret && !info.node) {
|
||||
mutex_unlock(&man->space_mutex);
|
||||
return ERR_PTR(info.ret);
|
||||
}
|
||||
|
||||
if (info.node) {
|
||||
mutex_unlock(&man->space_mutex);
|
||||
return info.node;
|
||||
}
|
||||
if (vmw_cmdbuf_try_alloc(man, &info))
|
||||
goto out_unlock;
|
||||
|
||||
vmw_generic_waiter_add(man->dev_priv,
|
||||
SVGA_IRQFLAG_COMMAND_BUFFER,
|
||||
|
@ -749,7 +735,7 @@ static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
|
|||
(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
|
||||
&man->dev_priv->cmdbuf_waiters);
|
||||
mutex_unlock(&man->space_mutex);
|
||||
return ERR_PTR(ret);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
|
||||
|
@ -757,11 +743,11 @@ static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
|
|||
vmw_generic_waiter_remove(man->dev_priv,
|
||||
SVGA_IRQFLAG_COMMAND_BUFFER,
|
||||
&man->dev_priv->cmdbuf_waiters);
|
||||
mutex_unlock(&man->space_mutex);
|
||||
if (info.ret && !info.node)
|
||||
return ERR_PTR(info.ret);
|
||||
|
||||
return info.node;
|
||||
out_unlock:
|
||||
mutex_unlock(&man->space_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -785,10 +771,10 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
|
|||
if (!man->has_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
header->node = vmw_cmdbuf_alloc_space(man, size, interruptible);
|
||||
ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
|
||||
|
||||
if (IS_ERR(header->node))
|
||||
return PTR_ERR(header->node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
|
||||
&header->handle);
|
||||
|
@ -797,9 +783,9 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
|
|||
goto out_no_cb_header;
|
||||
}
|
||||
|
||||
header->size = header->node->size << PAGE_SHIFT;
|
||||
header->size = header->node.size << PAGE_SHIFT;
|
||||
cb_hdr = header->cb_header;
|
||||
offset = header->node->start << PAGE_SHIFT;
|
||||
offset = header->node.start << PAGE_SHIFT;
|
||||
header->cmd = man->map + offset;
|
||||
memset(cb_hdr, 0, sizeof(*cb_hdr));
|
||||
if (man->using_mob) {
|
||||
|
@ -814,9 +800,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
|
|||
|
||||
out_no_cb_header:
|
||||
spin_lock_bh(&man->lock);
|
||||
drm_mm_remove_node(header->node);
|
||||
drm_mm_remove_node(&header->node);
|
||||
spin_unlock_bh(&man->lock);
|
||||
kfree(header->node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue