mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: remove amdgpu_fence_wait
It was just a wrapper for fence_wait anyway. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
713293b825
commit
02bc0650bc
|
@ -439,7 +439,6 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
|
|||
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
|
||||
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
|
||||
|
||||
int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
|
||||
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
|
||||
struct amdgpu_fence **fences,
|
||||
bool intr, long t);
|
||||
|
|
|
@ -42,7 +42,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
|
|||
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
|
||||
if (r)
|
||||
goto exit_do_move;
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r)
|
||||
goto exit_do_move;
|
||||
amdgpu_fence_unref(&fence);
|
||||
|
|
|
@ -52,7 +52,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
|
|||
if (work->fence) {
|
||||
fence = to_amdgpu_fence(work->fence);
|
||||
if (fence) {
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r == -EDEADLK) {
|
||||
up_read(&adev->exclusive_lock);
|
||||
r = amdgpu_gpu_reset(adev);
|
||||
|
|
|
@ -497,28 +497,6 @@ static long amdgpu_fence_ring_wait_seq_timeout(struct amdgpu_ring *ring, uint64_
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* amdgpu_fence_wait - wait for a fence to signal
|
||||
*
|
||||
* @fence: amdgpu fence object
|
||||
* @intr: use interruptable sleep
|
||||
*
|
||||
* Wait for the requested fence to signal (all asics).
|
||||
* @intr selects whether to use interruptable (true) or non-interruptable
|
||||
* (false) sleep when waiting for the fence.
|
||||
* Returns 0 if the fence has passed, error for all other cases.
|
||||
*/
|
||||
int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
|
||||
{
|
||||
long r;
|
||||
|
||||
r = fence_wait_timeout(&fence->base, intr, MAX_SCHEDULE_TIMEOUT);
|
||||
if (r < 0)
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_wait_next - wait for the next fence to signal
|
||||
*
|
||||
|
|
|
@ -180,7 +180,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
|
|||
|
||||
if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
|
||||
/* not enough room, wait manually */
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r)
|
||||
return r;
|
||||
continue;
|
||||
|
@ -200,7 +200,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
|
|||
if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
|
||||
/* signaling wasn't successful wait manually */
|
||||
amdgpu_ring_undo(other);
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r)
|
||||
return r;
|
||||
continue;
|
||||
|
@ -210,7 +210,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
|
|||
if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
|
||||
/* waiting wasn't successful wait manually */
|
||||
amdgpu_ring_undo(other);
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r)
|
||||
return r;
|
||||
continue;
|
||||
|
|
|
@ -116,7 +116,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
goto out_lclean_unpin;
|
||||
}
|
||||
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
|
||||
goto out_lclean_unpin;
|
||||
|
@ -161,7 +161,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
goto out_lclean_unpin;
|
||||
}
|
||||
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
|
||||
goto out_lclean_unpin;
|
||||
|
|
Loading…
Reference in New Issue