drm/amdgpu: cleanup in kernel job submission

Add a job_alloc_with_ib helper and proper job submission.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2016-02-01 12:20:25 +01:00 committed by Alex Deucher
parent a0332b56f6
commit d71518b5aa
7 changed files with 112 additions and 205 deletions

View File

@ -797,14 +797,11 @@ extern struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_ib *ibs,
unsigned num_ibs,
int (*free_job)(struct amdgpu_job *),
void *owner,
struct fence **fence);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
void *owner, struct fence **f);
struct amdgpu_ring {
struct amdgpu_device *adev;
@ -987,7 +984,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
int amdgpu_vm_free_job(struct amdgpu_job *job);
/*
* context related structures
@ -1244,7 +1240,6 @@ struct amdgpu_job {
uint32_t num_ibs;
void *owner;
struct amdgpu_user_fence uf;
int (*free_job)(struct amdgpu_job *job);
};
#define to_amdgpu_job(sched_job) \
container_of((sched_job), struct amdgpu_job, base)

View File

@ -752,12 +752,6 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
return 0;
}
static int amdgpu_cs_free_job(struct amdgpu_job *job)
{
amdgpu_job_free(job);
return 0;
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
@ -771,12 +765,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->base.sched = &ring->sched;
job->base.s_entity = &p->ctx->rings[ring->idx].entity;
job->owner = p->filp;
job->free_job = amdgpu_cs_free_job;
fence = amd_sched_fence_create(job->base.s_entity, p->filp);
if (!fence) {
amdgpu_cs_free_job(job);
kfree(job);
amdgpu_job_free(job);
return -ENOMEM;
}

View File

@ -45,11 +45,26 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->adev = adev;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
(*job)->free_job = NULL;
return 0;
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job)
{
int r;
r = amdgpu_job_alloc(adev, 1, job);
if (r)
return r;
r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
if (r)
kfree(*job);
return r;
}
void amdgpu_job_free(struct amdgpu_job *job)
{
unsigned i;
@ -58,7 +73,27 @@ void amdgpu_job_free(struct amdgpu_job *job)
amdgpu_ib_free(job->adev, &job->ibs[i]);
amdgpu_bo_unref(&job->uf.bo);
/* TODO: Free the job structure here as well */
kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
void *owner, struct fence **f)
{
struct amdgpu_device *adev = job->adev;
job->ring = ring;
job->base.sched = &ring->sched;
job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
if (!job->base.s_fence)
return -ENOMEM;
*f = fence_get(&job->base.s_fence->base);
job->owner = owner;
amd_sched_entity_push_job(&job->base);
return 0;
}
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
@ -106,10 +141,7 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
}
err:
if (job->free_job)
job->free_job(job);
kfree(job);
amdgpu_job_free(job);
return fence;
}
@ -117,35 +149,3 @@ struct amd_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_sched_dependency,
.run_job = amdgpu_sched_run_job,
};
int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_ib *ibs,
unsigned num_ibs,
int (*free_job)(struct amdgpu_job *),
void *owner,
struct fence **f)
{
struct amdgpu_job *job =
kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job)
return -ENOMEM;
job->base.sched = &ring->sched;
job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
if (!job->base.s_fence) {
kfree(job);
return -ENOMEM;
}
*f = fence_get(&job->base.s_fence->base);
job->adev = adev;
job->ring = ring;
job->ibs = ibs;
job->num_ibs = num_ibs;
job->owner = owner;
job->free_job = free_job;
amd_sched_entity_push_job(&job->base);
return 0;
}

View File

@ -1012,9 +1012,10 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
struct fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
uint32_t max_bytes;
unsigned num_loops, num_dw;
struct amdgpu_ib *ib;
unsigned i;
int r;
@ -1026,20 +1027,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
while (num_dw & 0x7)
num_dw++;
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!ib)
return -ENOMEM;
r = amdgpu_ib_get(adev, NULL, num_dw * 4, ib);
if (r) {
kfree(ib);
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
if (r)
return r;
}
ib->length_dw = 0;
if (resv) {
r = amdgpu_sync_resv(adev, &ib->sync, resv,
r = amdgpu_sync_resv(adev, &job->ibs[0].sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
@ -1050,27 +1043,24 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
for (i = 0; i < num_loops; i++) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset,
cur_size_in_bytes);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
dst_offset, cur_size_in_bytes);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
byte_count -= cur_size_in_bytes;
}
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > num_dw);
r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
&amdgpu_vm_free_job,
AMDGPU_FENCE_OWNER_UNDEFINED,
fence);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
return 0;
error_free:
amdgpu_ib_free(adev, ib);
kfree(ib);
amdgpu_job_free(job);
return r;
}

View File

@ -823,14 +823,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return 0;
}
static int amdgpu_uvd_free_job(
struct amdgpu_job *job)
{
amdgpu_ib_free(job->adev, job->ibs);
kfree(job->ibs);
return 0;
}
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct fence **fence)
@ -838,7 +830,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
struct amdgpu_ib *ib = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t addr;
@ -862,15 +855,12 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
if (r)
goto err;
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!ib) {
r = -ENOMEM;
goto err;
}
r = amdgpu_ib_get(adev, NULL, 64, ib);
if (r)
goto err1;
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
if (r)
goto err;
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
ib->ptr[1] = addr;
@ -882,12 +872,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
ib->ptr[i] = PACKET2(0);
ib->length_dw = 16;
r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
&amdgpu_uvd_free_job,
AMDGPU_FENCE_OWNER_UNDEFINED,
&f);
r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err2;
goto err_free;
ttm_eu_fence_buffer_objects(&ticket, &head, f);
@ -897,10 +884,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
fence_put(f);
return 0;
err2:
amdgpu_ib_free(ring->adev, ib);
err1:
kfree(ib);
err_free:
amdgpu_job_free(job);
err:
ttm_eu_backoff_reservation(&ticket, &head);
return r;

View File

@ -346,14 +346,6 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
}
static int amdgpu_vce_free_job(
struct amdgpu_job *job)
{
amdgpu_ib_free(job->adev, job->ibs);
kfree(job->ibs);
return 0;
}
/**
* amdgpu_vce_get_create_msg - generate a VCE create msg
*
@ -368,21 +360,17 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_ib *ib = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t dummy;
int i, r;
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!ib)
return -ENOMEM;
r = amdgpu_ib_get(adev, NULL, ib_size_dw * 4, ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
kfree(ib);
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
if (r)
return r;
}
ib = &job->ibs[0];
dummy = ib->gpu_addr + 1024;
@ -423,19 +411,16 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
&amdgpu_vce_free_job,
AMDGPU_FENCE_OWNER_UNDEFINED,
&f);
r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err;
if (fence)
*fence = fence_get(f);
fence_put(f);
return 0;
err:
amdgpu_ib_free(adev, ib);
kfree(ib);
amdgpu_job_free(job);
return r;
}
@ -453,23 +438,17 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_ib *ib = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t dummy;
int i, r;
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!ib)
return -ENOMEM;
r = amdgpu_ib_get(adev, NULL, ib_size_dw * 4, ib);
if (r) {
kfree(ib);
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
if (r)
return r;
}
ib = &job->ibs[0];
dummy = ib->gpu_addr + 1024;
/* stitch together an VCE destroy msg */
@ -489,19 +468,16 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
&amdgpu_vce_free_job,
AMDGPU_FENCE_OWNER_UNDEFINED,
&f);
r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err;
if (fence)
*fence = fence_get(f);
fence_put(f);
return 0;
err:
amdgpu_ib_free(adev, ib);
kfree(ib);
amdgpu_job_free(job);
return r;
}

View File

@ -313,15 +313,6 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
}
}
int amdgpu_vm_free_job(struct amdgpu_job *job)
{
int i;
for (i = 0; i < job->num_ibs; i++)
amdgpu_ib_free(job->adev, &job->ibs[i]);
kfree(job->ibs);
return 0;
}
/**
* amdgpu_vm_clear_bo - initially clear the page dir/table
*
@ -335,7 +326,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
{
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
struct fence *fence = NULL;
struct amdgpu_ib *ib;
struct amdgpu_job *job;
unsigned entries;
uint64_t addr;
int r;
@ -351,32 +342,25 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!ib)
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
if (r)
goto error;
r = amdgpu_ib_get(adev, NULL, 64, ib);
amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries,
0, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > 64);
r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
goto error_free;
ib->length_dw = 0;
amdgpu_vm_update_pages(adev, NULL, 0, ib, addr, 0, entries, 0, 0);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > 64);
r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
&amdgpu_vm_free_job,
AMDGPU_FENCE_OWNER_VM,
&fence);
if (!r)
amdgpu_bo_fence(bo, fence, true);
amdgpu_bo_fence(bo, fence, true);
fence_put(fence);
return 0;
error_free:
amdgpu_ib_free(adev, ib);
kfree(ib);
amdgpu_job_free(job);
error:
return r;
@ -433,6 +417,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *fence = NULL;
@ -444,16 +429,11 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
/* assume the worst case */
ndw += vm->max_pde_used * 6;
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!ib)
return -ENOMEM;
r = amdgpu_ib_get(adev, NULL, ndw * 4, ib);
if (r) {
kfree(ib);
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
return r;
}
ib->length_dw = 0;
ib = &job->ibs[0];
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@ -495,10 +475,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
amdgpu_ring_pad_ib(ring, ib);
amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
WARN_ON(ib->length_dw > ndw);
r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
&amdgpu_vm_free_job,
AMDGPU_FENCE_OWNER_VM,
&fence);
r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
goto error_free;
@ -506,18 +483,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
fence_put(vm->page_directory_fence);
vm->page_directory_fence = fence_get(fence);
fence_put(fence);
}
if (ib->length_dw == 0) {
amdgpu_ib_free(adev, ib);
kfree(ib);
} else {
amdgpu_job_free(job);
}
return 0;
error_free:
amdgpu_ib_free(adev, ib);
kfree(ib);
amdgpu_job_free(job);
return r;
}
@ -695,6 +669,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *f = NULL;
int r;
@ -733,15 +708,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
ndw += 2 * 10;
}
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!ib)
return -ENOMEM;
r = amdgpu_ib_get(adev, NULL, ndw * 4, ib);
if (r) {
kfree(ib);
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
return r;
}
ib = &job->ibs[0];
r = amdgpu_sync_resv(adev, &ib->sync, vm->page_directory->tbo.resv,
owner);
@ -757,10 +728,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > ndw);
r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
&amdgpu_vm_free_job,
AMDGPU_FENCE_OWNER_VM,
&f);
r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error_free;
@ -773,8 +741,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
return 0;
error_free:
amdgpu_ib_free(adev, ib);
kfree(ib);
amdgpu_job_free(job);
return r;
}