drm/amdgpu: move PT validation back into VM code v2

Saves a bunch of CPU cycles when swapping things back in and
allows us to split the VM headers into a separate file.

v2: rename parameters

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2016-09-28 12:03:04 +02:00 committed by Alex Deucher
parent a7d64de659
commit f7da30d979
4 changed files with 60 additions and 35 deletions

View File

@ -915,8 +915,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated, struct list_head *validated,
struct amdgpu_bo_list_entry *entry); struct amdgpu_bo_list_entry *entry);
void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct list_head *duplicates); int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm); struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,

View File

@ -388,9 +388,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
/* Last resort, try to evict something from the current working set */ /* Last resort, try to evict something from the current working set */
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *lobj) struct amdgpu_bo *validated)
{ {
uint32_t domain = lobj->robj->allowed_domains; uint32_t domain = validated->allowed_domains;
int r; int r;
if (!p->evictable) if (!p->evictable)
@ -406,7 +406,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
uint32_t other; uint32_t other;
/* If we reached our current BO we can forget it */ /* If we reached our current BO we can forget it */
if (candidate == lobj) if (candidate->robj == validated)
break; break;
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
@ -439,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
return false; return false;
} }
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
struct amdgpu_cs_parser *p = param;
int r;
do {
r = amdgpu_cs_bo_validate(p, bo);
} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
if (r)
return r;
if (bo->shadow)
r = amdgpu_cs_bo_validate(p, bo);
return r;
}
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated) struct list_head *validated)
{ {
@ -466,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (p->evictable == lobj) if (p->evictable == lobj)
p->evictable = NULL; p->evictable = NULL;
do { r = amdgpu_cs_validate(p, bo);
r = amdgpu_cs_bo_validate(p, bo);
} while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
if (r) if (r)
return r; return r;
if (bo->shadow) {
r = amdgpu_cs_bo_validate(p, bo);
if (r)
return r;
}
if (binding_userptr) { if (binding_userptr) {
drm_free_large(lobj->user_pages); drm_free_large(lobj->user_pages);
lobj->user_pages = NULL; lobj->user_pages = NULL;
@ -595,14 +604,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated); list_splice(&need_pages, &p->validated);
} }
amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0; p->bytes_moved = 0;
p->evictable = list_last_entry(&p->validated, p->evictable = list_last_entry(&p->validated,
struct amdgpu_bo_list_entry, struct amdgpu_bo_list_entry,
tv.head); tv.head);
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
amdgpu_cs_validate, p);
if (r) {
DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
goto error_validate;
}
r = amdgpu_cs_list_validate(p, &duplicates); r = amdgpu_cs_list_validate(p, &duplicates);
if (r) { if (r) {
DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");

View File

@ -469,6 +469,16 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
return r; return r;
} }
static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
{
unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
}
/** /**
* amdgpu_gem_va_update_vm -update the bo_va in its VM * amdgpu_gem_va_update_vm -update the bo_va in its VM
* *
@ -479,7 +489,8 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
* vital here, so they are not reported back to userspace. * vital here, so they are not reported back to userspace.
*/ */
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, uint32_t operation) struct amdgpu_bo_va *bo_va,
uint32_t operation)
{ {
struct ttm_validate_buffer tv, *entry; struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry vm_pd;
@ -502,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r) if (r)
goto error_print; goto error_print;
amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) { list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here, /* if anything is swapped out don't swap it in here,
@ -510,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (domain == AMDGPU_GEM_DOMAIN_CPU) if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve; goto error_unreserve;
} }
list_for_each_entry(entry, &duplicates, head) { r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); NULL);
/* if anything is swapped out don't swap it in here, if (r)
just abort and wait for the next CS */ goto error_unreserve;
if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve;
}
r = amdgpu_vm_update_page_directory(adev, bo_va->vm); r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
if (r) if (r)
@ -537,8 +544,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
DRM_ERROR("Couldn't update BO_VA (%d)\n", r); DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
} }
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {

View File

@ -116,27 +116,29 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
} }
/** /**
* amdgpu_vm_get_bos - add the vm BOs to a duplicates list * amdgpu_vm_validate_pt_bos - validate the page table BOs
* *
* @adev: amdgpu device pointer * @adev: amdgpu device pointer
* @vm: vm providing the BOs * @vm: vm providing the BOs
* @duplicates: head of duplicates list * @validate: callback to do the validation
* @param: parameter for the validation callback
* *
* Add the page directory to the BO duplicates list * Validate the page table BOs on command submission if neccessary.
* for command submission.
*/ */
void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct list_head *duplicates) int (*validate)(void *p, struct amdgpu_bo *bo),
void *param)
{ {
uint64_t num_evictions; uint64_t num_evictions;
unsigned i; unsigned i;
int r;
/* We only need to validate the page tables /* We only need to validate the page tables
* if they aren't already valid. * if they aren't already valid.
*/ */
num_evictions = atomic64_read(&adev->num_evictions); num_evictions = atomic64_read(&adev->num_evictions);
if (num_evictions == vm->last_eviction_counter) if (num_evictions == vm->last_eviction_counter)
return; return 0;
/* add the vm page table to the list */ /* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) { for (i = 0; i <= vm->max_pde_used; ++i) {
@ -145,9 +147,12 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!entry->robj) if (!entry->robj)
continue; continue;
list_add(&entry->tv.head, duplicates); r = validate(param, entry->robj);
if (r)
return r;
} }
return 0;
} }
/** /**