drm/vmwgfx: Stabilize the command buffer submission code
This commit addresses some stability problems with the command buffer submission code recently introduced: 1) Make the vmw_cmdbuf_man_process() function handle reruns internally to avoid losing interrupts if the caller forgets to rerun on -EAGAIN. 2) Handle default command buffer allocations using inline command buffers. This avoids rare allocation deadlocks. 3) In case of command buffer errors we might lose fence submissions. Therefore send a new fence after each command buffer error. This will help avoid lengthy fence waits. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com>
This commit is contained in:
parent
ed7d78b2da
commit
09dc1387c9
|
@ -415,16 +415,16 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
|
||||||
*
|
*
|
||||||
* Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
|
* Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
|
||||||
* command buffers left that are not submitted to hardware, Make sure
|
* command buffers left that are not submitted to hardware, Make sure
|
||||||
* IRQ handling is turned on. Otherwise, make sure it's turned off. This
|
* IRQ handling is turned on. Otherwise, make sure it's turned off.
|
||||||
* function may return -EAGAIN to indicate it should be rerun due to
|
|
||||||
* possibly missed IRQs if IRQs has just been turned on.
|
|
||||||
*/
|
*/
|
||||||
static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
|
static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
|
||||||
{
|
{
|
||||||
int notempty = 0;
|
int notempty;
|
||||||
struct vmw_cmdbuf_context *ctx;
|
struct vmw_cmdbuf_context *ctx;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
retry:
|
||||||
|
notempty = 0;
|
||||||
for_each_cmdbuf_ctx(man, i, ctx)
|
for_each_cmdbuf_ctx(man, i, ctx)
|
||||||
vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
|
vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
|
||||||
|
|
||||||
|
@ -440,10 +440,8 @@ static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
|
||||||
man->irq_on = true;
|
man->irq_on = true;
|
||||||
|
|
||||||
/* Rerun in case we just missed an irq. */
|
/* Rerun in case we just missed an irq. */
|
||||||
return -EAGAIN;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -468,8 +466,7 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
|
||||||
header->cb_context = cb_context;
|
header->cb_context = cb_context;
|
||||||
list_add_tail(&header->list, &man->ctx[cb_context].submitted);
|
list_add_tail(&header->list, &man->ctx[cb_context].submitted);
|
||||||
|
|
||||||
if (vmw_cmdbuf_man_process(man) == -EAGAIN)
|
vmw_cmdbuf_man_process(man);
|
||||||
vmw_cmdbuf_man_process(man);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -488,8 +485,7 @@ static void vmw_cmdbuf_man_tasklet(unsigned long data)
|
||||||
struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
|
struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
|
||||||
|
|
||||||
spin_lock(&man->lock);
|
spin_lock(&man->lock);
|
||||||
if (vmw_cmdbuf_man_process(man) == -EAGAIN)
|
vmw_cmdbuf_man_process(man);
|
||||||
(void) vmw_cmdbuf_man_process(man);
|
|
||||||
spin_unlock(&man->lock);
|
spin_unlock(&man->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -507,6 +503,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
|
||||||
struct vmw_cmdbuf_man *man =
|
struct vmw_cmdbuf_man *man =
|
||||||
container_of(work, struct vmw_cmdbuf_man, work);
|
container_of(work, struct vmw_cmdbuf_man, work);
|
||||||
struct vmw_cmdbuf_header *entry, *next;
|
struct vmw_cmdbuf_header *entry, *next;
|
||||||
|
uint32_t dummy;
|
||||||
bool restart = false;
|
bool restart = false;
|
||||||
|
|
||||||
spin_lock_bh(&man->lock);
|
spin_lock_bh(&man->lock);
|
||||||
|
@ -523,6 +520,8 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
|
||||||
if (restart && vmw_cmdbuf_startstop(man, true))
|
if (restart && vmw_cmdbuf_startstop(man, true))
|
||||||
DRM_ERROR("Failed restarting command buffer context 0.\n");
|
DRM_ERROR("Failed restarting command buffer context 0.\n");
|
||||||
|
|
||||||
|
/* Send a new fence in case one was removed */
|
||||||
|
vmw_fifo_send_fence(man->dev_priv, &dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -682,7 +681,7 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
|
||||||
DRM_MM_SEARCH_DEFAULT,
|
DRM_MM_SEARCH_DEFAULT,
|
||||||
DRM_MM_CREATE_DEFAULT);
|
DRM_MM_CREATE_DEFAULT);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
(void) vmw_cmdbuf_man_process(man);
|
vmw_cmdbuf_man_process(man);
|
||||||
ret = drm_mm_insert_node_generic(&man->mm, info->node,
|
ret = drm_mm_insert_node_generic(&man->mm, info->node,
|
||||||
info->page_size, 0, 0,
|
info->page_size, 0, 0,
|
||||||
DRM_MM_SEARCH_DEFAULT,
|
DRM_MM_SEARCH_DEFAULT,
|
||||||
|
@ -1168,7 +1167,14 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
|
||||||
drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
|
drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
|
||||||
|
|
||||||
man->has_pool = true;
|
man->has_pool = true;
|
||||||
man->default_size = default_size;
|
|
||||||
|
/*
|
||||||
|
* For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
|
||||||
|
* prevent deadlocks from happening when vmw_cmdbuf_space_pool()
|
||||||
|
* needs to wait for space and we block on further command
|
||||||
|
* submissions to be able to free up space.
|
||||||
|
*/
|
||||||
|
man->default_size = VMW_CMDBUF_INLINE_SIZE;
|
||||||
DRM_INFO("Using command buffers with %s pool.\n",
|
DRM_INFO("Using command buffers with %s pool.\n",
|
||||||
(man->using_mob) ? "MOB" : "DMA");
|
(man->using_mob) ? "MOB" : "DMA");
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue