mirror of https://gitee.com/openkylin/linux.git
drm_amdgpu: Add job fence to resv conditionally
Job fence on page table should be a shared one, so add it to the root page talbe bo resv. last_delayed field is not needed anymore. so remove it. Cc: Christian König <christian.koenig@amd.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Felix Kuehling <Felix.Kuehling@amd.com> Suggested-by: Christian König <christian.koenig@amd.com> Signed-off-by: xinhui pan <xinhui.pan@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
79cb2719be
commit
57210c19e4
|
@ -1608,9 +1608,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||
|
||||
if (!dma_fence_is_signaled(vm->last_direct))
|
||||
amdgpu_bo_fence(root, vm->last_direct, true);
|
||||
|
||||
if (!dma_fence_is_signaled(vm->last_delayed))
|
||||
amdgpu_bo_fence(root, vm->last_delayed, true);
|
||||
}
|
||||
|
||||
r = vm->update_funcs->prepare(¶ms, resv, sync_mode);
|
||||
|
@ -2588,8 +2585,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
|
|||
return false;
|
||||
|
||||
/* Don't evict VM page tables while they are updated */
|
||||
if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
|
||||
!dma_fence_is_signaled(bo_base->vm->last_delayed)) {
|
||||
if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
|
||||
amdgpu_vm_eviction_unlock(bo_base->vm);
|
||||
return false;
|
||||
}
|
||||
|
@ -2766,11 +2762,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
|||
if (timeout <= 0)
|
||||
return timeout;
|
||||
|
||||
timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
|
||||
if (timeout <= 0)
|
||||
return timeout;
|
||||
|
||||
return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
|
||||
return dma_fence_wait_timeout(vm->last_direct, true, timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2843,7 +2835,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
vm->update_funcs = &amdgpu_vm_sdma_funcs;
|
||||
vm->last_update = NULL;
|
||||
vm->last_direct = dma_fence_get_stub();
|
||||
vm->last_delayed = dma_fence_get_stub();
|
||||
|
||||
mutex_init(&vm->eviction_lock);
|
||||
vm->evicting = false;
|
||||
|
@ -2898,7 +2889,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
|
||||
error_free_delayed:
|
||||
dma_fence_put(vm->last_direct);
|
||||
dma_fence_put(vm->last_delayed);
|
||||
drm_sched_entity_destroy(&vm->delayed);
|
||||
|
||||
error_free_direct:
|
||||
|
@ -3101,8 +3091,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
|
||||
dma_fence_wait(vm->last_direct, false);
|
||||
dma_fence_put(vm->last_direct);
|
||||
dma_fence_wait(vm->last_delayed, false);
|
||||
dma_fence_put(vm->last_delayed);
|
||||
|
||||
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
|
||||
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
|
||||
|
|
|
@ -276,7 +276,6 @@ struct amdgpu_vm {
|
|||
|
||||
/* Last submission to the scheduler entities */
|
||||
struct dma_fence *last_direct;
|
||||
struct dma_fence *last_delayed;
|
||||
|
||||
unsigned int pasid;
|
||||
/* dedicated to vm */
|
||||
|
|
|
@ -104,12 +104,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
|
|||
if (r)
|
||||
goto error;
|
||||
|
||||
tmp = dma_fence_get(f);
|
||||
if (p->direct)
|
||||
if (p->direct) {
|
||||
tmp = dma_fence_get(f);
|
||||
swap(p->vm->last_direct, tmp);
|
||||
else
|
||||
swap(p->vm->last_delayed, tmp);
|
||||
dma_fence_put(tmp);
|
||||
dma_fence_put(tmp);
|
||||
} else {
|
||||
dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
|
||||
}
|
||||
|
||||
if (fence && !p->direct)
|
||||
swap(*fence, f);
|
||||
|
|
Loading…
Reference in New Issue