mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: deal with foreign fences in amdgpu_sync
This also requires some error handling from the callers of that function. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
This commit is contained in:
parent
0b492a4c92
commit
91e1a5207e
|
@ -699,8 +699,8 @@ struct amdgpu_sync {
|
|||
};
|
||||
|
||||
void amdgpu_sync_create(struct amdgpu_sync *sync);
|
||||
void amdgpu_sync_fence(struct amdgpu_sync *sync,
|
||||
struct amdgpu_fence *fence);
|
||||
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||
struct fence *f);
|
||||
int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||
struct amdgpu_sync *sync,
|
||||
struct reservation_object *resv,
|
||||
|
|
|
@ -482,6 +482,8 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
|
|||
|
||||
if (p->bo_list) {
|
||||
for (i = 0; i < p->bo_list->num_entries; i++) {
|
||||
struct fence *f;
|
||||
|
||||
/* ignore duplicates */
|
||||
bo = p->bo_list->array[i].robj;
|
||||
if (!bo)
|
||||
|
@ -495,7 +497,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
|
||||
f = &bo_va->last_pt_update->base;
|
||||
r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -715,9 +720,12 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
|
||||
amdgpu_sync_fence(&ib->sync, fence);
|
||||
r = amdgpu_sync_fence(adev, &ib->sync, &fence->base);
|
||||
amdgpu_fence_unref(&fence);
|
||||
amdgpu_ctx_put(ctx);
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -167,7 +167,11 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
|
|||
/* grab a vm id if necessary */
|
||||
struct amdgpu_fence *vm_id_fence = NULL;
|
||||
vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
|
||||
amdgpu_sync_fence(&ibs->sync, vm_id_fence);
|
||||
r = amdgpu_sync_fence(adev, &ibs->sync, &vm_id_fence->base);
|
||||
if (r) {
|
||||
amdgpu_ring_unlock_undo(ring);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_sync_rings(&ibs->sync, ring);
|
||||
|
|
|
@ -53,20 +53,24 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_fence - use the semaphore to sync to a fence
|
||||
* amdgpu_sync_fence - remember to sync to this fence
|
||||
*
|
||||
* @sync: sync object to add fence to
|
||||
* @fence: fence to sync to
|
||||
*
|
||||
* Sync to the fence using the semaphore objects
|
||||
*/
|
||||
void amdgpu_sync_fence(struct amdgpu_sync *sync,
|
||||
struct amdgpu_fence *fence)
|
||||
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||
struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence;
|
||||
struct amdgpu_fence *other;
|
||||
|
||||
if (!fence)
|
||||
return;
|
||||
if (!f)
|
||||
return 0;
|
||||
|
||||
fence = to_amdgpu_fence(f);
|
||||
if (!fence || fence->ring->adev != adev)
|
||||
return fence_wait(f, true);
|
||||
|
||||
other = sync->sync_to[fence->ring->idx];
|
||||
sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
|
||||
|
@ -79,6 +83,8 @@ void amdgpu_sync_fence(struct amdgpu_sync *sync,
|
|||
amdgpu_fence_later(fence, other));
|
||||
amdgpu_fence_unref(&other);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -106,11 +112,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
|
|||
|
||||
/* always sync to the exclusive fence */
|
||||
f = reservation_object_get_excl(resv);
|
||||
fence = f ? to_amdgpu_fence(f) : NULL;
|
||||
if (fence && fence->ring->adev == adev)
|
||||
amdgpu_sync_fence(sync, fence);
|
||||
else if (f)
|
||||
r = fence_wait(f, true);
|
||||
r = amdgpu_sync_fence(adev, sync, f);
|
||||
|
||||
flist = reservation_object_get_list(resv);
|
||||
if (!flist || r)
|
||||
|
@ -120,15 +122,14 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
|
|||
f = rcu_dereference_protected(flist->shared[i],
|
||||
reservation_object_held(resv));
|
||||
fence = f ? to_amdgpu_fence(f) : NULL;
|
||||
if (fence && fence->ring->adev == adev) {
|
||||
if (fence->owner != owner ||
|
||||
fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED)
|
||||
amdgpu_sync_fence(sync, fence);
|
||||
} else if (f) {
|
||||
r = fence_wait(f, true);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
if (fence && fence->ring->adev == adev &&
|
||||
fence->owner == owner &&
|
||||
fence->owner != AMDGPU_FENCE_OWNER_UNDEFINED)
|
||||
continue;
|
||||
|
||||
r = amdgpu_sync_fence(adev, sync, f);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -732,7 +732,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_fence *f = vm->ids[i].last_id_use;
|
||||
amdgpu_sync_fence(&ib.sync, f);
|
||||
r = amdgpu_sync_fence(adev, &ib.sync, &f->base);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -861,7 +863,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
|
|||
struct amdgpu_vm *vm, struct amdgpu_sync *sync)
|
||||
{
|
||||
struct amdgpu_bo_va *bo_va = NULL;
|
||||
int r;
|
||||
int r = 0;
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
while (!list_empty(&vm->invalidated)) {
|
||||
|
@ -878,8 +880,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
|
|||
spin_unlock(&vm->status_lock);
|
||||
|
||||
if (bo_va)
|
||||
amdgpu_sync_fence(sync, bo_va->last_pt_update);
|
||||
return 0;
|
||||
r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue