mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: fix coding style in the scheduler v2
v2: fix even more Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Monk.Liu <monk.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
3cc259112d
commit
16a7133f35
|
@ -320,7 +320,9 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
|
|||
}
|
||||
|
||||
static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
|
||||
struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
|
||||
struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
|
||||
cb_free_job);
|
||||
|
||||
schedule_work(&job->work_free_job);
|
||||
}
|
||||
|
||||
|
@ -341,7 +343,8 @@ void amd_sched_job_finish(struct amd_sched_job *s_job)
|
|||
struct amd_sched_job, node);
|
||||
|
||||
if (next) {
|
||||
INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
|
||||
INIT_DELAYED_WORK(&next->work_tdr,
|
||||
s_job->timeout_callback);
|
||||
amd_sched_job_get(next);
|
||||
schedule_delayed_work(&next->work_tdr, sched->timeout);
|
||||
}
|
||||
|
@ -353,7 +356,8 @@ void amd_sched_job_begin(struct amd_sched_job *s_job)
|
|||
struct amd_gpu_scheduler *sched = s_job->sched;
|
||||
|
||||
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
|
||||
list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
|
||||
list_first_entry_or_null(&sched->ring_mirror_list,
|
||||
struct amd_sched_job, node) == s_job)
|
||||
{
|
||||
INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
|
||||
amd_sched_job_get(s_job);
|
||||
|
@ -374,7 +378,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
|
|||
|
||||
sched_job->use_sched = 1;
|
||||
fence_add_callback(&sched_job->s_fence->base,
|
||||
&sched_job->cb_free_job, amd_sched_free_job);
|
||||
&sched_job->cb_free_job, amd_sched_free_job);
|
||||
trace_amd_sched_job(sched_job);
|
||||
wait_event(entity->sched->job_scheduled,
|
||||
amd_sched_entity_in(sched_job));
|
||||
|
@ -382,11 +386,11 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
|
|||
|
||||
/* init a sched_job with basic field */
|
||||
int amd_sched_job_init(struct amd_sched_job *job,
|
||||
struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity,
|
||||
void (*timeout_cb)(struct work_struct *work),
|
||||
void (*free_cb)(struct kref *refcount),
|
||||
void *owner, struct fence **fence)
|
||||
struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity,
|
||||
void (*timeout_cb)(struct work_struct *work),
|
||||
void (*free_cb)(struct kref *refcount),
|
||||
void *owner, struct fence **fence)
|
||||
{
|
||||
INIT_LIST_HEAD(&job->node);
|
||||
kref_init(&job->refcount);
|
||||
|
@ -504,7 +508,8 @@ static int amd_sched_main(void *param)
|
|||
if (r == -ENOENT)
|
||||
amd_sched_process_job(fence, &s_fence->cb);
|
||||
else if (r)
|
||||
DRM_ERROR("fence add callback failed (%d)\n", r);
|
||||
DRM_ERROR("fence add callback failed (%d)\n",
|
||||
r);
|
||||
fence_put(fence);
|
||||
} else {
|
||||
DRM_ERROR("Failed to run job!\n");
|
||||
|
|
|
@ -94,7 +94,8 @@ struct amd_sched_job {
|
|||
extern const struct fence_ops amd_sched_fence_ops;
|
||||
static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
|
||||
{
|
||||
struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
|
||||
struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence,
|
||||
base);
|
||||
|
||||
if (__f->base.ops == &amd_sched_fence_ops)
|
||||
return __f;
|
||||
|
@ -154,21 +155,23 @@ struct amd_sched_fence *amd_sched_fence_create(
|
|||
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
|
||||
void amd_sched_fence_signal(struct amd_sched_fence *fence);
|
||||
int amd_sched_job_init(struct amd_sched_job *job,
|
||||
struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity,
|
||||
void (*timeout_cb)(struct work_struct *work),
|
||||
void (*free_cb)(struct kref* refcount),
|
||||
void *owner, struct fence **fence);
|
||||
struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity,
|
||||
void (*timeout_cb)(struct work_struct *work),
|
||||
void (*free_cb)(struct kref* refcount),
|
||||
void *owner, struct fence **fence);
|
||||
void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
|
||||
struct amd_sched_job *s_job);
|
||||
struct amd_sched_job *s_job);
|
||||
void amd_sched_job_finish(struct amd_sched_job *s_job);
|
||||
void amd_sched_job_begin(struct amd_sched_job *s_job);
|
||||
static inline void amd_sched_job_get(struct amd_sched_job *job) {
|
||||
static inline void amd_sched_job_get(struct amd_sched_job *job)
|
||||
{
|
||||
if (job)
|
||||
kref_get(&job->refcount);
|
||||
}
|
||||
|
||||
static inline void amd_sched_job_put(struct amd_sched_job *job) {
|
||||
static inline void amd_sched_job_put(struct amd_sched_job *job)
|
||||
{
|
||||
if (job)
|
||||
kref_put(&job->refcount, job->free_callback);
|
||||
}
|
||||
|
|
|
@ -27,7 +27,8 @@
|
|||
#include <drm/drmP.h>
|
||||
#include "gpu_scheduler.h"
|
||||
|
||||
struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
|
||||
struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
|
||||
void *owner)
|
||||
{
|
||||
struct amd_sched_fence *fence = NULL;
|
||||
unsigned seq;
|
||||
|
@ -38,12 +39,12 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
|
|||
|
||||
INIT_LIST_HEAD(&fence->scheduled_cb);
|
||||
fence->owner = owner;
|
||||
fence->sched = s_entity->sched;
|
||||
fence->sched = entity->sched;
|
||||
spin_lock_init(&fence->lock);
|
||||
|
||||
seq = atomic_inc_return(&s_entity->fence_seq);
|
||||
seq = atomic_inc_return(&entity->fence_seq);
|
||||
fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
|
||||
s_entity->fence_context, seq);
|
||||
entity->fence_context, seq);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue