drm/amdgpu: move VM fields into job

They are the same for all IBs.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2016-05-06 17:50:03 +02:00 committed by Alex Deucher
parent 92f250989b
commit d88bf583bd
14 changed files with 66 additions and 66 deletions

View File

@ -283,7 +283,8 @@ struct amdgpu_ring_funcs {
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
/* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, bool ctx_switch);
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@ -741,11 +742,6 @@ struct amdgpu_ib {
uint64_t gpu_addr;
uint32_t *ptr;
struct amdgpu_user_fence *user;
unsigned vm_id;
uint64_t vm_pd_addr;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
uint32_t flags;
/* resulting sequence number */
uint64_t sequence;
@ -1262,6 +1258,11 @@ struct amdgpu_job {
uint32_t num_ibs;
void *owner;
uint64_t ctx;
unsigned vm_id;
uint64_t vm_pd_addr;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
struct amdgpu_user_fence uf;
};
#define to_amdgpu_job(sched_job) \
@ -2221,7 +2222,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
#define amdgpu_ring_emit_ib(r, ib, c) (r)->funcs->emit_ib((r), (ib), (c))
#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))

View File

@ -473,6 +473,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto error_validate;
if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj;
struct amdgpu_bo *oa = p->bo_list->oa_obj;
struct amdgpu_vm *vm = &fpriv->vm;
unsigned i;
@ -481,6 +484,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
}
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
p->job->gds_size = amdgpu_bo_size(gds);
}
if (gws) {
p->job->gws_base = amdgpu_bo_gpu_offset(gws);
p->job->gws_size = amdgpu_bo_size(gws);
}
if (oa) {
p->job->oa_base = amdgpu_bo_gpu_offset(oa);
p->job->oa_size = amdgpu_bo_size(oa);
}
}
error_validate:
@ -744,26 +760,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
j++;
}
/* add GDS resources to first IB */
if (parser->bo_list) {
struct amdgpu_bo *gds = parser->bo_list->gds_obj;
struct amdgpu_bo *gws = parser->bo_list->gws_obj;
struct amdgpu_bo *oa = parser->bo_list->oa_obj;
struct amdgpu_ib *ib = &parser->job->ibs[0];
if (gds) {
ib->gds_base = amdgpu_bo_gpu_offset(gds);
ib->gds_size = amdgpu_bo_size(gds);
}
if (gws) {
ib->gws_base = amdgpu_bo_gpu_offset(gws);
ib->gws_size = amdgpu_bo_size(gws);
}
if (oa) {
ib->oa_base = amdgpu_bo_gpu_offset(oa);
ib->oa_size = amdgpu_bo_size(oa);
}
}
/* wrap the last IB with user fence */
if (parser->job->uf.bo) {
struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];

View File

@ -74,8 +74,6 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
}
ib->vm_id = 0;
return 0;
}
@ -147,7 +145,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
if (vm && !ibs->vm_id) {
if (vm && !job->vm_id) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
@ -162,10 +160,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) {
r = amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
ib->gds_base, ib->gds_size,
ib->gws_base, ib->gws_size,
ib->oa_base, ib->oa_size);
r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
job->gds_base, job->gds_size,
job->gws_base, job->gws_size,
job->oa_base, job->oa_size);
if (r) {
amdgpu_ring_undo(ring);
return r;
@ -187,7 +185,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
continue;
amdgpu_ring_emit_ib(ring, ib, need_ctx_switch);
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
need_ctx_switch);
need_ctx_switch = false;
}
@ -197,8 +196,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
r = amdgpu_fence_emit(ring, &hwf);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (ib->vm_id)
amdgpu_vm_reset_id(adev, ib->vm_id);
if (job && job->vm_id)
amdgpu_vm_reset_id(adev, job->vm_id);
amdgpu_ring_undo(ring);
return r;
}

View File

@ -142,23 +142,15 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
struct fence *fence = amdgpu_sync_get_fence(&job->sync);
if (fence == NULL && vm && !job->ibs->vm_id) {
if (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
unsigned i, vm_id;
uint64_t vm_pd_addr;
int r;
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
&job->base.s_fence->base,
&vm_id, &vm_pd_addr);
&job->vm_id, &job->vm_pd_addr);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
else {
for (i = 0; i < job->num_ibs; ++i) {
job->ibs[i].vm_id = vm_id;
job->ibs[i].vm_pd_addr = vm_pd_addr;
}
}
fence = amdgpu_sync_get_fence(&job->sync);
}

View File

@ -762,7 +762,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
* @ib: the IB to execute
*
*/
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, bool ctx_switch)
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
amdgpu_ring_write(ring, VCE_CMD_IB);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));

View File

@ -34,7 +34,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, bool ctx_switch);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);

View File

@ -210,9 +210,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (CIK).
*/
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, bool ctx_switch)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
u32 extra_bits = ib->vm_id & 0xf;
u32 extra_bits = vm_id & 0xf;
u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 4)

View File

@ -2030,7 +2030,8 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* on the gfx ring for execution by the GPU.
*/
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, bool ctx_switch)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
@ -2056,7 +2057,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (ib->vm_id << 24);
control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@ -2069,7 +2070,8 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, bool ctx_switch)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
@ -2084,7 +2086,7 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (ib->vm_id << 24);
control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,

View File

@ -5646,7 +5646,8 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
}
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, bool ctx_switch)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
@ -5672,7 +5673,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (ib->vm_id << 24);
control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@ -5685,7 +5686,8 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, bool ctx_switch)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
@ -5701,7 +5703,7 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (ib->vm_id << 24);
control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,

View File

@ -242,9 +242,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, bool ctx_switch)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
u32 vmid = ib->vm_id & 0xf;
u32 vmid = vm_id & 0xf;
u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 2)

View File

@ -400,9 +400,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, bool ctx_switch)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
u32 vmid = ib->vm_id & 0xf;
u32 vmid = vm_id & 0xf;
u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 2)

View File

@ -489,7 +489,8 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, bool ctx_switch)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
amdgpu_ring_write(ring, ib->gpu_addr);

View File

@ -539,7 +539,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, bool ctx_switch)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));

View File

@ -631,7 +631,8 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, bool ctx_switch)
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));