mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: update the shadow PD together with the real one v2
Far less CPU cycles needed for this approach. v2: fix typo Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
42e8cb5001
commit
f8991bab1a
|
@ -823,7 +823,6 @@ struct amdgpu_ring {
|
||||||
struct amdgpu_vm_pt {
|
struct amdgpu_vm_pt {
|
||||||
struct amdgpu_bo_list_entry entry;
|
struct amdgpu_bo_list_entry entry;
|
||||||
uint64_t addr;
|
uint64_t addr;
|
||||||
uint64_t shadow_addr;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_vm {
|
struct amdgpu_vm {
|
||||||
|
|
|
@ -612,16 +612,26 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
|
/*
|
||||||
struct amdgpu_vm *vm,
|
* amdgpu_vm_update_pdes - make sure that page directory is valid
|
||||||
bool shadow)
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
* @vm: requested vm
|
||||||
|
* @start: start of GPU address range
|
||||||
|
* @end: end of GPU address range
|
||||||
|
*
|
||||||
|
* Allocates new page tables if necessary
|
||||||
|
* and updates the page directory.
|
||||||
|
* Returns 0 for success, error for failure.
|
||||||
|
*/
|
||||||
|
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_vm *vm)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_bo *shadow;
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
|
uint64_t pd_addr, shadow_addr;
|
||||||
vm->page_directory;
|
|
||||||
uint64_t pd_addr;
|
|
||||||
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
|
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
|
||||||
uint64_t last_pde = ~0, last_pt = ~0;
|
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
|
||||||
unsigned count = 0, pt_idx, ndw;
|
unsigned count = 0, pt_idx, ndw;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
struct amdgpu_pte_update_params params;
|
struct amdgpu_pte_update_params params;
|
||||||
|
@ -629,15 +639,8 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
|
||||||
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (!pd)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
pd_addr = amdgpu_bo_gpu_offset(pd);
|
|
||||||
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
||||||
|
shadow = vm->page_directory->shadow;
|
||||||
|
|
||||||
/* padding, etc. */
|
/* padding, etc. */
|
||||||
ndw = 64;
|
ndw = 64;
|
||||||
|
@ -645,6 +648,17 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
|
||||||
/* assume the worst case */
|
/* assume the worst case */
|
||||||
ndw += vm->max_pde_used * 6;
|
ndw += vm->max_pde_used * 6;
|
||||||
|
|
||||||
|
pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
|
||||||
|
if (shadow) {
|
||||||
|
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
shadow_addr = amdgpu_bo_gpu_offset(shadow);
|
||||||
|
ndw *= 2;
|
||||||
|
} else {
|
||||||
|
shadow_addr = 0;
|
||||||
|
}
|
||||||
|
|
||||||
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
|
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -662,23 +676,19 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (bo->shadow) {
|
if (bo->shadow) {
|
||||||
struct amdgpu_bo *shadow = bo->shadow;
|
struct amdgpu_bo *pt_shadow = bo->shadow;
|
||||||
|
|
||||||
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
|
r = amdgpu_ttm_bind(&pt_shadow->tbo,
|
||||||
|
&pt_shadow->tbo.mem);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
pt = amdgpu_bo_gpu_offset(bo);
|
pt = amdgpu_bo_gpu_offset(bo);
|
||||||
if (!shadow) {
|
|
||||||
if (vm->page_tables[pt_idx].addr == pt)
|
if (vm->page_tables[pt_idx].addr == pt)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
vm->page_tables[pt_idx].addr = pt;
|
vm->page_tables[pt_idx].addr = pt;
|
||||||
} else {
|
|
||||||
if (vm->page_tables[pt_idx].shadow_addr == pt)
|
|
||||||
continue;
|
|
||||||
vm->page_tables[pt_idx].shadow_addr = pt;
|
|
||||||
}
|
|
||||||
|
|
||||||
pde = pd_addr + pt_idx * 8;
|
pde = pd_addr + pt_idx * 8;
|
||||||
if (((last_pde + 8 * count) != pde) ||
|
if (((last_pde + 8 * count) != pde) ||
|
||||||
|
@ -686,6 +696,13 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
|
||||||
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
|
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
|
||||||
|
|
||||||
if (count) {
|
if (count) {
|
||||||
|
if (shadow)
|
||||||
|
amdgpu_vm_do_set_ptes(¶ms,
|
||||||
|
last_shadow,
|
||||||
|
last_pt, count,
|
||||||
|
incr,
|
||||||
|
AMDGPU_PTE_VALID);
|
||||||
|
|
||||||
amdgpu_vm_do_set_ptes(¶ms, last_pde,
|
amdgpu_vm_do_set_ptes(¶ms, last_pde,
|
||||||
last_pt, count, incr,
|
last_pt, count, incr,
|
||||||
AMDGPU_PTE_VALID);
|
AMDGPU_PTE_VALID);
|
||||||
|
@ -693,35 +710,45 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
|
||||||
|
|
||||||
count = 1;
|
count = 1;
|
||||||
last_pde = pde;
|
last_pde = pde;
|
||||||
|
last_shadow = shadow_addr + pt_idx * 8;
|
||||||
last_pt = pt;
|
last_pt = pt;
|
||||||
} else {
|
} else {
|
||||||
++count;
|
++count;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (count)
|
if (count) {
|
||||||
amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt,
|
if (vm->page_directory->shadow)
|
||||||
|
amdgpu_vm_do_set_ptes(¶ms, last_shadow, last_pt,
|
||||||
count, incr, AMDGPU_PTE_VALID);
|
count, incr, AMDGPU_PTE_VALID);
|
||||||
|
|
||||||
if (params.ib->length_dw != 0) {
|
amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt,
|
||||||
|
count, incr, AMDGPU_PTE_VALID);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params.ib->length_dw == 0) {
|
||||||
|
amdgpu_job_free(job);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
amdgpu_ring_pad_ib(ring, params.ib);
|
amdgpu_ring_pad_ib(ring, params.ib);
|
||||||
amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
|
amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
|
||||||
AMDGPU_FENCE_OWNER_VM);
|
AMDGPU_FENCE_OWNER_VM);
|
||||||
|
if (shadow)
|
||||||
|
amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
|
||||||
|
AMDGPU_FENCE_OWNER_VM);
|
||||||
|
|
||||||
WARN_ON(params.ib->length_dw > ndw);
|
WARN_ON(params.ib->length_dw > ndw);
|
||||||
r = amdgpu_job_submit(job, ring, &vm->entity,
|
r = amdgpu_job_submit(job, ring, &vm->entity,
|
||||||
AMDGPU_FENCE_OWNER_VM, &fence);
|
AMDGPU_FENCE_OWNER_VM, &fence);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free;
|
goto error_free;
|
||||||
|
|
||||||
amdgpu_bo_fence(pd, fence, true);
|
amdgpu_bo_fence(vm->page_directory, fence, true);
|
||||||
fence_put(vm->page_directory_fence);
|
fence_put(vm->page_directory_fence);
|
||||||
vm->page_directory_fence = fence_get(fence);
|
vm->page_directory_fence = fence_get(fence);
|
||||||
fence_put(fence);
|
fence_put(fence);
|
||||||
|
|
||||||
} else {
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_free:
|
error_free:
|
||||||
|
@ -729,29 +756,6 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* amdgpu_vm_update_pdes - make sure that page directory is valid
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
* @vm: requested vm
|
|
||||||
* @start: start of GPU address range
|
|
||||||
* @end: end of GPU address range
|
|
||||||
*
|
|
||||||
* Allocates new page tables if necessary
|
|
||||||
* and updates the page directory.
|
|
||||||
* Returns 0 for success, error for failure.
|
|
||||||
*/
|
|
||||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_vm *vm)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
|
|
||||||
r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_update_ptes - make sure that page tables are valid
|
* amdgpu_vm_update_ptes - make sure that page tables are valid
|
||||||
*
|
*
|
||||||
|
|
Loading…
Reference in New Issue