drm/amdgpu: cleanup and fix scheduler fence handling v2
v2: rebased Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (v1) Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
This commit is contained in:
parent
91404fb208
commit
6f0e54a964
|
@ -43,16 +43,9 @@ static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_fence_sched_cb(struct fence *f, struct fence_cb *cb)
|
static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
|
||||||
{
|
struct amd_sched_entity *entity,
|
||||||
struct amd_sched_job *sched_job =
|
struct amd_sched_job *job)
|
||||||
container_of(cb, struct amd_sched_job, cb);
|
|
||||||
amd_sched_process_job(sched_job);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
|
|
||||||
struct amd_sched_entity *entity,
|
|
||||||
struct amd_sched_job *job)
|
|
||||||
{
|
{
|
||||||
int r = 0;
|
int r = 0;
|
||||||
struct amdgpu_cs_parser *sched_job;
|
struct amdgpu_cs_parser *sched_job;
|
||||||
|
@ -60,7 +53,7 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
|
||||||
|
|
||||||
if (!job || !job->job) {
|
if (!job || !job->job) {
|
||||||
DRM_ERROR("job is null\n");
|
DRM_ERROR("job is null\n");
|
||||||
return;
|
return NULL;
|
||||||
}
|
}
|
||||||
sched_job = (struct amdgpu_cs_parser *)job->job;
|
sched_job = (struct amdgpu_cs_parser *)job->job;
|
||||||
mutex_lock(&sched_job->job_lock);
|
mutex_lock(&sched_job->job_lock);
|
||||||
|
@ -70,12 +63,7 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
|
||||||
sched_job->filp);
|
sched_job->filp);
|
||||||
if (r)
|
if (r)
|
||||||
goto err;
|
goto err;
|
||||||
fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
|
fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
|
||||||
if (fence_add_callback(&fence->base,
|
|
||||||
&job->cb, amdgpu_fence_sched_cb)) {
|
|
||||||
DRM_ERROR("fence add callback failed\n");
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sched_job->run_job) {
|
if (sched_job->run_job) {
|
||||||
r = sched_job->run_job(sched_job);
|
r = sched_job->run_job(sched_job);
|
||||||
|
@ -86,11 +74,13 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
|
||||||
amd_sched_emit(entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
|
amd_sched_emit(entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
|
||||||
|
|
||||||
mutex_unlock(&sched_job->job_lock);
|
mutex_unlock(&sched_job->job_lock);
|
||||||
return;
|
return &fence->base;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
DRM_ERROR("Run job error\n");
|
DRM_ERROR("Run job error\n");
|
||||||
mutex_unlock(&sched_job->job_lock);
|
mutex_unlock(&sched_job->job_lock);
|
||||||
schedule_work(&sched_job->job_work);
|
schedule_work(&sched_job->job_work);
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
|
static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
|
||||||
|
|
|
@ -175,9 +175,9 @@ select_context(struct amd_gpu_scheduler *sched)
|
||||||
* return 0 if succeed. negative error code on failure
|
* return 0 if succeed. negative error code on failure
|
||||||
*/
|
*/
|
||||||
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_sched_entity *entity,
|
struct amd_sched_entity *entity,
|
||||||
struct amd_run_queue *rq,
|
struct amd_run_queue *rq,
|
||||||
uint32_t jobs)
|
uint32_t jobs)
|
||||||
{
|
{
|
||||||
uint64_t seq_ring = 0;
|
uint64_t seq_ring = 0;
|
||||||
|
|
||||||
|
@ -353,6 +353,24 @@ int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
|
||||||
|
{
|
||||||
|
struct amd_sched_job *sched_job =
|
||||||
|
container_of(cb, struct amd_sched_job, cb);
|
||||||
|
struct amd_gpu_scheduler *sched;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
sched = sched_job->sched;
|
||||||
|
spin_lock_irqsave(&sched->queue_lock, flags);
|
||||||
|
list_del(&sched_job->list);
|
||||||
|
atomic64_dec(&sched->hw_rq_count);
|
||||||
|
spin_unlock_irqrestore(&sched->queue_lock, flags);
|
||||||
|
|
||||||
|
sched->ops->process_job(sched, sched_job->job);
|
||||||
|
kfree(sched_job);
|
||||||
|
wake_up_interruptible(&sched->wait_queue);
|
||||||
|
}
|
||||||
|
|
||||||
static int amd_sched_main(void *param)
|
static int amd_sched_main(void *param)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
@ -365,6 +383,8 @@ static int amd_sched_main(void *param)
|
||||||
|
|
||||||
while (!kthread_should_stop()) {
|
while (!kthread_should_stop()) {
|
||||||
struct amd_sched_job *sched_job = NULL;
|
struct amd_sched_job *sched_job = NULL;
|
||||||
|
struct fence *fence;
|
||||||
|
|
||||||
wait_event_interruptible(sched->wait_queue,
|
wait_event_interruptible(sched->wait_queue,
|
||||||
is_scheduler_ready(sched) &&
|
is_scheduler_ready(sched) &&
|
||||||
(c_entity = select_context(sched)));
|
(c_entity = select_context(sched)));
|
||||||
|
@ -388,36 +408,21 @@ static int amd_sched_main(void *param)
|
||||||
spin_unlock_irqrestore(&sched->queue_lock, flags);
|
spin_unlock_irqrestore(&sched->queue_lock, flags);
|
||||||
}
|
}
|
||||||
mutex_lock(&sched->sched_lock);
|
mutex_lock(&sched->sched_lock);
|
||||||
sched->ops->run_job(sched, c_entity, sched_job);
|
fence = sched->ops->run_job(sched, c_entity, sched_job);
|
||||||
|
if (fence) {
|
||||||
|
r = fence_add_callback(fence, &sched_job->cb,
|
||||||
|
amd_sched_process_job);
|
||||||
|
if (r == -ENOENT)
|
||||||
|
amd_sched_process_job(fence, &sched_job->cb);
|
||||||
|
else if (r)
|
||||||
|
DRM_ERROR("fence add callback failed (%d)\n", r);
|
||||||
|
fence_put(fence);
|
||||||
|
}
|
||||||
mutex_unlock(&sched->sched_lock);
|
mutex_unlock(&sched->sched_lock);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* ISR to handle EOP inetrrupts
|
|
||||||
*
|
|
||||||
* @sched: gpu scheduler
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
void amd_sched_process_job(struct amd_sched_job *sched_job)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
struct amd_gpu_scheduler *sched;
|
|
||||||
|
|
||||||
if (!sched_job)
|
|
||||||
return;
|
|
||||||
sched = sched_job->sched;
|
|
||||||
spin_lock_irqsave(&sched->queue_lock, flags);
|
|
||||||
list_del(&sched_job->list);
|
|
||||||
atomic64_dec(&sched->hw_rq_count);
|
|
||||||
spin_unlock_irqrestore(&sched->queue_lock, flags);
|
|
||||||
|
|
||||||
sched->ops->process_job(sched, sched_job->job);
|
|
||||||
kfree(sched_job);
|
|
||||||
wake_up_interruptible(&sched->wait_queue);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a gpu scheduler
|
* Create a gpu scheduler
|
||||||
*
|
*
|
||||||
|
|
|
@ -87,9 +87,9 @@ struct amd_sched_backend_ops {
|
||||||
int (*prepare_job)(struct amd_gpu_scheduler *sched,
|
int (*prepare_job)(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_sched_entity *c_entity,
|
struct amd_sched_entity *c_entity,
|
||||||
void *job);
|
void *job);
|
||||||
void (*run_job)(struct amd_gpu_scheduler *sched,
|
struct fence *(*run_job)(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_sched_entity *c_entity,
|
struct amd_sched_entity *c_entity,
|
||||||
struct amd_sched_job *job);
|
struct amd_sched_job *job);
|
||||||
void (*process_job)(struct amd_gpu_scheduler *sched, void *job);
|
void (*process_job)(struct amd_gpu_scheduler *sched, void *job);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -132,7 +132,6 @@ int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
|
||||||
bool intr,
|
bool intr,
|
||||||
long timeout);
|
long timeout);
|
||||||
|
|
||||||
void amd_sched_process_job(struct amd_sched_job *sched_job);
|
|
||||||
uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched);
|
uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched);
|
||||||
|
|
||||||
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
||||||
|
|
Loading…
Reference in New Issue