mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: remove amd_sched_wait_emit v2
Not used any more. v2: remove amd_sched_emit as well. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
This commit is contained in:
parent
2983e5cef3
commit
05caae8515
|
@ -77,8 +77,6 @@ static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
amd_sched_emit(entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
|
|
||||||
|
|
||||||
mutex_unlock(&sched_job->job_lock);
|
mutex_unlock(&sched_job->job_lock);
|
||||||
return &fence->base;
|
return &fence->base;
|
||||||
|
|
||||||
|
|
|
@ -202,7 +202,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock_init(&entity->queue_lock);
|
spin_lock_init(&entity->queue_lock);
|
||||||
atomic64_set(&entity->last_emitted_v_seq, seq_ring);
|
|
||||||
atomic64_set(&entity->last_queued_v_seq, seq_ring);
|
atomic64_set(&entity->last_queued_v_seq, seq_ring);
|
||||||
atomic64_set(&entity->last_signaled_v_seq, seq_ring);
|
atomic64_set(&entity->last_signaled_v_seq, seq_ring);
|
||||||
|
|
||||||
|
@ -329,53 +328,6 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Wait for a virtual sequence number to be emitted.
|
|
||||||
*
|
|
||||||
* @c_entity The pointer to a valid context entity
|
|
||||||
* @seq The virtual sequence number to wait
|
|
||||||
* @intr Interruptible or not
|
|
||||||
* @timeout Timeout in ms, wait infinitely if <0
|
|
||||||
* @emit wait for emit or signal
|
|
||||||
*
|
|
||||||
* return =0 signaled , <0 failed
|
|
||||||
*/
|
|
||||||
int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
|
|
||||||
uint64_t seq,
|
|
||||||
bool intr,
|
|
||||||
long timeout)
|
|
||||||
{
|
|
||||||
atomic64_t *v_seq = &c_entity->last_emitted_v_seq;
|
|
||||||
wait_queue_head_t *wait_queue = &c_entity->wait_emit;
|
|
||||||
|
|
||||||
if (intr && (timeout < 0)) {
|
|
||||||
wait_event_interruptible(
|
|
||||||
*wait_queue,
|
|
||||||
seq <= atomic64_read(v_seq));
|
|
||||||
return 0;
|
|
||||||
} else if (intr && (timeout >= 0)) {
|
|
||||||
wait_event_interruptible_timeout(
|
|
||||||
*wait_queue,
|
|
||||||
seq <= atomic64_read(v_seq),
|
|
||||||
msecs_to_jiffies(timeout));
|
|
||||||
return (seq <= atomic64_read(v_seq)) ?
|
|
||||||
0 : -1;
|
|
||||||
} else if (!intr && (timeout < 0)) {
|
|
||||||
wait_event(
|
|
||||||
*wait_queue,
|
|
||||||
seq <= atomic64_read(v_seq));
|
|
||||||
return 0;
|
|
||||||
} else if (!intr && (timeout >= 0)) {
|
|
||||||
wait_event_timeout(
|
|
||||||
*wait_queue,
|
|
||||||
seq <= atomic64_read(v_seq),
|
|
||||||
msecs_to_jiffies(timeout));
|
|
||||||
return (seq <= atomic64_read(v_seq)) ?
|
|
||||||
0 : -1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
|
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
|
||||||
{
|
{
|
||||||
struct amd_sched_job *sched_job =
|
struct amd_sched_job *sched_job =
|
||||||
|
@ -510,19 +462,6 @@ int amd_sched_destroy(struct amd_gpu_scheduler *sched)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Update emitted sequence and wake up the waiters, called by run_job
|
|
||||||
* in driver side
|
|
||||||
*
|
|
||||||
* @entity The context entity
|
|
||||||
* @seq The sequence number for the latest emitted job
|
|
||||||
*/
|
|
||||||
void amd_sched_emit(struct amd_sched_entity *c_entity, uint64_t seq)
|
|
||||||
{
|
|
||||||
atomic64_set(&c_entity->last_emitted_v_seq, seq);
|
|
||||||
wake_up_all(&c_entity->wait_emit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get next queued sequence number
|
* Get next queued sequence number
|
||||||
*
|
*
|
||||||
|
|
|
@ -44,7 +44,6 @@ struct amd_sched_entity {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
/* the virtual_seq is unique per context per ring */
|
/* the virtual_seq is unique per context per ring */
|
||||||
atomic64_t last_queued_v_seq;
|
atomic64_t last_queued_v_seq;
|
||||||
atomic64_t last_emitted_v_seq;
|
|
||||||
atomic64_t last_signaled_v_seq;
|
atomic64_t last_signaled_v_seq;
|
||||||
/* the job_queue maintains the jobs submitted by clients */
|
/* the job_queue maintains the jobs submitted by clients */
|
||||||
struct kfifo job_queue;
|
struct kfifo job_queue;
|
||||||
|
@ -154,13 +153,6 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
||||||
void *data,
|
void *data,
|
||||||
struct amd_sched_fence **fence);
|
struct amd_sched_fence **fence);
|
||||||
|
|
||||||
int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
|
|
||||||
uint64_t seq,
|
|
||||||
bool intr,
|
|
||||||
long timeout);
|
|
||||||
|
|
||||||
uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched);
|
|
||||||
|
|
||||||
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_sched_entity *entity,
|
struct amd_sched_entity *entity,
|
||||||
struct amd_run_queue *rq,
|
struct amd_run_queue *rq,
|
||||||
|
@ -168,8 +160,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
||||||
int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
|
int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_sched_entity *entity);
|
struct amd_sched_entity *entity);
|
||||||
|
|
||||||
void amd_sched_emit(struct amd_sched_entity *c_entity, uint64_t seq);
|
|
||||||
|
|
||||||
uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity);
|
uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity);
|
||||||
|
|
||||||
struct amd_sched_fence *amd_sched_fence_create(
|
struct amd_sched_fence *amd_sched_fence_create(
|
||||||
|
|
Loading…
Reference in New Issue