drm/amdgpu: save the PD addr before scheduling the job

When we pipeline evictions the page directory could already be
moving somewhere else when grab_id is called.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2016-06-15 13:44:04 +02:00 committed by Alex Deucher
parent 3ddf4ad917
commit 281d144ddd
2 changed files with 4 additions and 4 deletions

View File

@ -660,6 +660,8 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
} }
} }
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
r = amdgpu_bo_vm_update_pte(p, vm); r = amdgpu_bo_vm_update_pte(p, vm);
if (!r) if (!r)
amdgpu_cs_sync_rings(p); amdgpu_cs_sync_rings(p);

View File

@ -177,7 +177,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence, struct amdgpu_sync *sync, struct fence *fence,
unsigned *vm_id, uint64_t *vm_pd_addr) unsigned *vm_id, uint64_t *vm_pd_addr)
{ {
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct fence *updates = sync->last_vm_update; struct fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle; struct amdgpu_vm_id *id, *idle;
@ -250,7 +249,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (atomic64_read(&id->owner) != vm->client_id) if (atomic64_read(&id->owner) != vm->client_id)
continue; continue;
if (pd_addr != id->pd_gpu_addr) if (*vm_pd_addr != id->pd_gpu_addr)
continue; continue;
if (!same_ring && if (!same_ring &&
@ -298,14 +297,13 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
fence_put(id->flushed_updates); fence_put(id->flushed_updates);
id->flushed_updates = fence_get(updates); id->flushed_updates = fence_get(updates);
id->pd_gpu_addr = pd_addr; id->pd_gpu_addr = *vm_pd_addr;
list_move_tail(&id->list, &adev->vm_manager.ids_lru); list_move_tail(&id->list, &adev->vm_manager.ids_lru);
atomic64_set(&id->owner, vm->client_id); atomic64_set(&id->owner, vm->client_id);
vm->ids[ring->idx] = id; vm->ids[ring->idx] = id;
*vm_id = id - adev->vm_manager.ids; *vm_id = id - adev->vm_manager.ids;
*vm_pd_addr = pd_addr;
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
error: error: