drm/amdgpu: add reference for **fence

fix fence is released when pass to **fence sometimes.
add reference for it.

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
This commit is contained in:
Chunming Zhou 2015-08-12 12:58:31 +08:00 committed by Alex Deucher
parent 1ffd265243
commit 281b422301
11 changed files with 15 additions and 3 deletions

View File

@ -136,6 +136,7 @@ static void amdgpu_job_work_func(struct work_struct *work)
sched_job->free_job(sched_job); sched_job->free_job(sched_job);
mutex_unlock(&sched_job->job_lock); mutex_unlock(&sched_job->job_lock);
/* after processing job, free memory */ /* after processing job, free memory */
fence_put(&sched_job->s_fence->base);
kfree(sched_job); kfree(sched_job);
} }
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,

View File

@ -133,13 +133,13 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
return r; return r;
} }
ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq; ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq;
*f = &sched_job->s_fence->base; *f = fence_get(&sched_job->s_fence->base);
mutex_unlock(&sched_job->job_lock); mutex_unlock(&sched_job->job_lock);
} else { } else {
r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
if (r) if (r)
return r; return r;
*f = &ibs[num_ibs - 1].fence->base; *f = fence_get(&ibs[num_ibs - 1].fence->base);
} }
return 0; return 0;
} }

View File

@ -877,7 +877,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
if (fence) if (fence)
*fence = fence_get(f); *fence = fence_get(f);
amdgpu_bo_unref(&bo); amdgpu_bo_unref(&bo);
fence_put(f);
if (amdgpu_enable_scheduler) if (amdgpu_enable_scheduler)
return 0; return 0;

View File

@ -415,6 +415,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
goto err; goto err;
if (fence) if (fence)
*fence = fence_get(f); *fence = fence_get(f);
fence_put(f);
if (amdgpu_enable_scheduler) if (amdgpu_enable_scheduler)
return 0; return 0;
err: err:
@ -481,6 +482,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
goto err; goto err;
if (fence) if (fence)
*fence = fence_get(f); *fence = fence_get(f);
fence_put(f);
if (amdgpu_enable_scheduler) if (amdgpu_enable_scheduler)
return 0; return 0;
err: err:

View File

@ -366,6 +366,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
&fence); &fence);
if (!r) if (!r)
amdgpu_bo_fence(bo, fence, true); amdgpu_bo_fence(bo, fence, true);
fence_put(fence);
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
return 0; return 0;
@ -495,6 +496,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (r) if (r)
goto error_free; goto error_free;
amdgpu_bo_fence(pd, fence, true); amdgpu_bo_fence(pd, fence, true);
fence_put(fence);
} }
if (!amdgpu_enable_scheduler || ib->length_dw == 0) { if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
@ -812,6 +814,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
fence_put(*fence); fence_put(*fence);
*fence = fence_get(f); *fence = fence_get(f);
} }
fence_put(f);
if (!amdgpu_enable_scheduler) { if (!amdgpu_enable_scheduler) {
amdgpu_ib_free(adev, ib); amdgpu_ib_free(adev, ib);
kfree(ib); kfree(ib);

View File

@ -669,6 +669,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
} }
err1: err1:
fence_put(f);
amdgpu_ib_free(adev, &ib); amdgpu_ib_free(adev, &ib);
err0: err0:
amdgpu_wb_free(adev, index); amdgpu_wb_free(adev, index);

View File

@ -2698,6 +2698,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
} }
err2: err2:
fence_put(f);
amdgpu_ib_free(adev, &ib); amdgpu_ib_free(adev, &ib);
err1: err1:
amdgpu_gfx_scratch_free(adev, scratch); amdgpu_gfx_scratch_free(adev, scratch);

View File

@ -659,6 +659,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
r = -EINVAL; r = -EINVAL;
} }
err2: err2:
fence_put(f);
amdgpu_ib_free(adev, &ib); amdgpu_ib_free(adev, &ib);
err1: err1:
amdgpu_gfx_scratch_free(adev, scratch); amdgpu_gfx_scratch_free(adev, scratch);

View File

@ -733,6 +733,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
} }
err1: err1:
fence_put(f);
amdgpu_ib_free(adev, &ib); amdgpu_ib_free(adev, &ib);
err0: err0:
amdgpu_wb_free(adev, index); amdgpu_wb_free(adev, index);

View File

@ -853,6 +853,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
r = -EINVAL; r = -EINVAL;
} }
err1: err1:
fence_put(f);
amdgpu_ib_free(adev, &ib); amdgpu_ib_free(adev, &ib);
err0: err0:
amdgpu_wb_free(adev, index); amdgpu_wb_free(adev, index);

View File

@ -313,6 +313,7 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
kfree(job); kfree(job);
return -EINVAL; return -EINVAL;
} }
fence_get(&(*fence)->base);
job->s_fence = *fence; job->s_fence = *fence;
while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *), while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
&c_entity->queue_lock) != sizeof(void *)) { &c_entity->queue_lock) != sizeof(void *)) {