mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: fix amdgpu_vm_handle_moved as well v2
There is no guarantee that the last BO_VA actually needed an update. Additional to that all command submissions must wait for moved BOs to be cleared, not just the first one. v2: Don't overwrite any newer fence. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
4a00f21db8
commit
4e55eb3879
|
@ -814,7 +814,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
|
r = amdgpu_vm_handle_moved(adev, vm);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
|
|
@ -1743,7 +1743,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||||
dma_addr_t *pages_addr = NULL;
|
dma_addr_t *pages_addr = NULL;
|
||||||
struct ttm_mem_reg *mem;
|
struct ttm_mem_reg *mem;
|
||||||
struct drm_mm_node *nodes;
|
struct drm_mm_node *nodes;
|
||||||
struct dma_fence *exclusive;
|
struct dma_fence *exclusive, **last_update;
|
||||||
uint64_t flags;
|
uint64_t flags;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
@ -1769,6 +1769,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||||
else
|
else
|
||||||
flags = 0x0;
|
flags = 0x0;
|
||||||
|
|
||||||
|
if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
|
||||||
|
last_update = &vm->last_update;
|
||||||
|
else
|
||||||
|
last_update = &bo_va->last_pt_update;
|
||||||
|
|
||||||
if (!clear && bo_va->base.moved) {
|
if (!clear && bo_va->base.moved) {
|
||||||
bo_va->base.moved = false;
|
bo_va->base.moved = false;
|
||||||
list_splice_init(&bo_va->valids, &bo_va->invalids);
|
list_splice_init(&bo_va->valids, &bo_va->invalids);
|
||||||
|
@ -1780,7 +1785,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||||
list_for_each_entry(mapping, &bo_va->invalids, list) {
|
list_for_each_entry(mapping, &bo_va->invalids, list) {
|
||||||
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
|
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
|
||||||
mapping, flags, nodes,
|
mapping, flags, nodes,
|
||||||
&bo_va->last_pt_update);
|
last_update);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -1803,12 +1808,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||||
trace_amdgpu_vm_bo_mapping(mapping);
|
trace_amdgpu_vm_bo_mapping(mapping);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bo_va->base.bo &&
|
|
||||||
bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
|
|
||||||
dma_fence_put(vm->last_update);
|
|
||||||
vm->last_update = dma_fence_get(bo_va->last_pt_update);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2006,15 +2005,15 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||||
* PTs have to be reserved!
|
* PTs have to be reserved!
|
||||||
*/
|
*/
|
||||||
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm,
|
struct amdgpu_vm *vm)
|
||||||
struct amdgpu_sync *sync)
|
|
||||||
{
|
{
|
||||||
struct amdgpu_bo_va *bo_va = NULL;
|
|
||||||
bool clear;
|
bool clear;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
spin_lock(&vm->status_lock);
|
spin_lock(&vm->status_lock);
|
||||||
while (!list_empty(&vm->moved)) {
|
while (!list_empty(&vm->moved)) {
|
||||||
|
struct amdgpu_bo_va *bo_va;
|
||||||
|
|
||||||
bo_va = list_first_entry(&vm->moved,
|
bo_va = list_first_entry(&vm->moved,
|
||||||
struct amdgpu_bo_va, base.vm_status);
|
struct amdgpu_bo_va, base.vm_status);
|
||||||
spin_unlock(&vm->status_lock);
|
spin_unlock(&vm->status_lock);
|
||||||
|
@ -2030,9 +2029,6 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
spin_unlock(&vm->status_lock);
|
spin_unlock(&vm->status_lock);
|
||||||
|
|
||||||
if (bo_va)
|
|
||||||
r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
|
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -250,8 +250,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm,
|
struct amdgpu_vm *vm,
|
||||||
struct dma_fence **fence);
|
struct dma_fence **fence);
|
||||||
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm,
|
struct amdgpu_vm *vm);
|
||||||
struct amdgpu_sync *sync);
|
|
||||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||||
struct amdgpu_bo_va *bo_va,
|
struct amdgpu_bo_va *bo_va,
|
||||||
bool clear);
|
bool clear);
|
||||||
|
|
Loading…
Reference in New Issue