drm/amdgpu: remove ring parameter from amdgpu_job_submit
We know the ring through the entity anyway. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Acked-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
eb3961a574
commit
0e28b10ff1
|
@ -117,21 +117,20 @@ void amdgpu_job_free(struct amdgpu_job *job)
|
|||
kfree(job);
|
||||
}
|
||||
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
struct drm_sched_entity *entity, void *owner,
|
||||
struct dma_fence **f)
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
void *owner, struct dma_fence **f)
|
||||
{
|
||||
int r;
|
||||
job->ring = ring;
|
||||
|
||||
if (!f)
|
||||
return -EINVAL;
|
||||
|
||||
r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
|
||||
r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
job->owner = owner;
|
||||
job->ring = to_amdgpu_ring(entity->sched);
|
||||
*f = dma_fence_get(&job->base.s_fence->finished);
|
||||
amdgpu_job_free_resources(job);
|
||||
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
|
||||
|
|
|
@ -67,7 +67,6 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
|||
|
||||
void amdgpu_job_free_resources(struct amdgpu_job *job);
|
||||
void amdgpu_job_free(struct amdgpu_job *job);
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
struct drm_sched_entity *entity, void *owner,
|
||||
struct dma_fence **f);
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
void *owner, struct dma_fence **f);
|
||||
#endif
|
||||
|
|
|
@ -44,6 +44,8 @@
|
|||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
|
||||
|
||||
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
|
||||
|
||||
enum amdgpu_ring_type {
|
||||
AMDGPU_RING_TYPE_GFX,
|
||||
AMDGPU_RING_TYPE_COMPUTE,
|
||||
|
|
|
@ -2006,7 +2006,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
|
|||
if (r)
|
||||
goto error_free;
|
||||
|
||||
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
@ -2083,7 +2083,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
|||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
@ -2175,7 +2175,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||
|
||||
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
||||
WARN_ON(job->ibs[0].length_dw > num_dw);
|
||||
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
|
|
@ -1074,7 +1074,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
|||
if (r)
|
||||
goto err_free;
|
||||
|
||||
r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
|
||||
r = amdgpu_job_submit(job, &adev->uvd.inst[ring->me].entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
|
|
@ -539,7 +539,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
|
||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
|
|
@ -425,8 +425,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
goto error_free;
|
||||
|
||||
r = amdgpu_job_submit(job, ring, &vm->entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
|
||||
r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
&fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
|
@ -1120,8 +1120,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
|||
amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
|
||||
AMDGPU_FENCE_OWNER_VM, false);
|
||||
WARN_ON(params.ib->length_dw > ndw);
|
||||
r = amdgpu_job_submit(job, ring, &vm->entity,
|
||||
AMDGPU_FENCE_OWNER_VM, &fence);
|
||||
r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
|
||||
&fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
|
@ -1485,8 +1485,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||
|
||||
amdgpu_ring_pad_ib(ring, params.ib);
|
||||
WARN_ON(params.ib->length_dw > ndw);
|
||||
r = amdgpu_job_submit(job, ring, &vm->entity,
|
||||
AMDGPU_FENCE_OWNER_VM, &f);
|
||||
r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
|
|
|
@ -320,7 +320,7 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
|||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
|
||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
|
|
@ -321,7 +321,7 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
|
||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
|
Loading…
Reference in New Issue