drm/amdgpu: use a spinlock instead of a mutex for the rq

More appropriate and fixes some nasty lockdep warnings.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
This commit is contained in:
Christian König 2015-08-18 14:41:25 +02:00 committed by Alex Deucher
parent a3348bb801
commit 2b184d8dbc
2 changed files with 17 additions and 32 deletions

View File

@ -30,27 +30,27 @@
/* Initialize a given run queue struct */
static void amd_sched_rq_init(struct amd_sched_rq *rq)
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
mutex_init(&rq->lock);
rq->current_entity = NULL;
}
static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
struct amd_sched_entity *entity)
{
mutex_lock(&rq->lock);
spin_lock(&rq->lock);
list_add_tail(&entity->list, &rq->entities);
mutex_unlock(&rq->lock);
spin_unlock(&rq->lock);
}
static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
struct amd_sched_entity *entity)
{
mutex_lock(&rq->lock);
spin_lock(&rq->lock);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
mutex_unlock(&rq->lock);
spin_unlock(&rq->lock);
}
/**
@ -61,12 +61,16 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
static struct amd_sched_entity *
amd_sched_rq_select_entity(struct amd_sched_rq *rq)
{
struct amd_sched_entity *entity = rq->current_entity;
struct amd_sched_entity *entity;
spin_lock(&rq->lock);
entity = rq->current_entity;
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
if (!kfifo_is_empty(&entity->job_queue)) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
return rq->current_entity;
}
}
@ -76,6 +80,7 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
if (!kfifo_is_empty(&entity->job_queue)) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
return rq->current_entity;
}
@ -83,6 +88,8 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
break;
}
spin_unlock(&rq->lock);
return NULL;
}
@ -108,22 +115,6 @@ static bool is_scheduler_ready(struct amd_gpu_scheduler *sched)
return full;
}
/**
* Select next entity from the kernel run queue, if not available,
* return null.
*/
static struct amd_sched_entity *
kernel_rq_select_context(struct amd_gpu_scheduler *sched)
{
struct amd_sched_entity *sched_entity;
struct amd_sched_rq *rq = &sched->kernel_rq;
mutex_lock(&rq->lock);
sched_entity = amd_sched_rq_select_entity(rq);
mutex_unlock(&rq->lock);
return sched_entity;
}
/**
* Select next entity containing real IB submissions
*/
@ -132,21 +123,15 @@ select_context(struct amd_gpu_scheduler *sched)
{
struct amd_sched_entity *wake_entity = NULL;
struct amd_sched_entity *tmp;
struct amd_sched_rq *rq;
if (!is_scheduler_ready(sched))
return NULL;
/* Kernel run queue has higher priority than normal run queue*/
tmp = kernel_rq_select_context(sched);
if (tmp != NULL)
goto exit;
tmp = amd_sched_rq_select_entity(&sched->kernel_rq);
if (tmp == NULL)
tmp = amd_sched_rq_select_entity(&sched->sched_rq);
rq = &sched->sched_rq;
mutex_lock(&rq->lock);
tmp = amd_sched_rq_select_entity(rq);
mutex_unlock(&rq->lock);
exit:
if (sched->current_entity && (sched->current_entity != tmp))
wake_entity = sched->current_entity;
sched->current_entity = tmp;

View File

@ -63,7 +63,7 @@ struct amd_sched_entity {
* the next entity to emit commands from.
*/
struct amd_sched_rq {
struct mutex lock;
spinlock_t lock;
struct list_head entities;
struct amd_sched_entity *current_entity;
};