drm/amdgpu: allocate GTT space for shadow VM page tables

We need to access those with the system domain.

Fixes fallout from only allocating GTT space on demand.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2016-09-16 11:46:23 +02:00 committed by Alex Deucher
parent 765e7fbf08
commit 0fc8683e56
1 changed files with 17 additions and 0 deletions

View File

@ -552,6 +552,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (r)
goto error;
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
@ -625,6 +629,11 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
if (!pd)
return 0;
r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
if (r)
return r;
pd_addr = amdgpu_bo_gpu_offset(pd);
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
@ -650,6 +659,14 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
if (bo == NULL)
continue;
if (bo->shadow) {
struct amdgpu_bo *shadow = bo->shadow;
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
if (r)
return r;
}
pt = amdgpu_bo_gpu_offset(bo);
if (!shadow) {
if (vm->page_tables[pt_idx].addr == pt)