mirror of https://gitee.com/openkylin/linux.git
drm/radeon: further cleanup vm flushing & fencing
Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
2280ab57b6
commit
fa68834342
|
@ -2804,6 +2804,9 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||||
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
|
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||||
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
|
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
|
||||||
struct radeon_vm *vm, int ring);
|
struct radeon_vm *vm, int ring);
|
||||||
|
void radeon_vm_flush(struct radeon_device *rdev,
|
||||||
|
struct radeon_vm *vm,
|
||||||
|
int ring);
|
||||||
void radeon_vm_fence(struct radeon_device *rdev,
|
void radeon_vm_fence(struct radeon_device *rdev,
|
||||||
struct radeon_vm *vm,
|
struct radeon_vm *vm,
|
||||||
struct radeon_fence *fence);
|
struct radeon_fence *fence);
|
||||||
|
|
|
@ -511,10 +511,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
||||||
r = radeon_ib_schedule(rdev, &parser->ib, NULL);
|
r = radeon_ib_schedule(rdev, &parser->ib, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!r) {
|
|
||||||
radeon_vm_fence(rdev, vm, parser->ib.fence);
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
radeon_vm_add_to_lru(rdev, vm);
|
radeon_vm_add_to_lru(rdev, vm);
|
||||||
mutex_unlock(&vm->mutex);
|
mutex_unlock(&vm->mutex);
|
||||||
|
|
|
@ -153,11 +153,9 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if we can't remember our last VM flush then flush now! */
|
if (ib->vm)
|
||||||
/* XXX figure out why we have to flush for every IB */
|
radeon_vm_flush(rdev, ib->vm, ib->ring);
|
||||||
if (ib->vm /*&& !ib->vm->last_flush*/) {
|
|
||||||
radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
|
|
||||||
}
|
|
||||||
if (const_ib) {
|
if (const_ib) {
|
||||||
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
|
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
|
||||||
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
|
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
|
||||||
|
@ -172,10 +170,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
|
||||||
if (const_ib) {
|
if (const_ib) {
|
||||||
const_ib->fence = radeon_fence_ref(ib->fence);
|
const_ib->fence = radeon_fence_ref(ib->fence);
|
||||||
}
|
}
|
||||||
/* we just flushed the VM, remember that */
|
|
||||||
if (ib->vm && !ib->vm->last_flush) {
|
if (ib->vm)
|
||||||
ib->vm->last_flush = radeon_fence_ref(ib->fence);
|
radeon_vm_fence(rdev, ib->vm, ib->fence);
|
||||||
}
|
|
||||||
radeon_ring_unlock_commit(rdev, ring);
|
radeon_ring_unlock_commit(rdev, ring);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -378,6 +378,27 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* radeon_vm_flush - hardware flush the vm
|
||||||
|
*
|
||||||
|
* @rdev: radeon_device pointer
|
||||||
|
* @vm: vm we want to flush
|
||||||
|
* @ring: ring to use for flush
|
||||||
|
*
|
||||||
|
* Flush the vm (cayman+).
|
||||||
|
*
|
||||||
|
* Global and local mutex must be locked!
|
||||||
|
*/
|
||||||
|
void radeon_vm_flush(struct radeon_device *rdev,
|
||||||
|
struct radeon_vm *vm,
|
||||||
|
int ring)
|
||||||
|
{
|
||||||
|
/* if we can't remember our last VM flush then flush now! */
|
||||||
|
/* XXX figure out why we have to flush all the time */
|
||||||
|
if (!vm->last_flush || true)
|
||||||
|
radeon_ring_vm_flush(rdev, ring, vm);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* radeon_vm_fence - remember fence for vm
|
* radeon_vm_fence - remember fence for vm
|
||||||
*
|
*
|
||||||
|
@ -394,14 +415,18 @@ void radeon_vm_fence(struct radeon_device *rdev,
|
||||||
struct radeon_vm *vm,
|
struct radeon_vm *vm,
|
||||||
struct radeon_fence *fence)
|
struct radeon_fence *fence)
|
||||||
{
|
{
|
||||||
radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
|
|
||||||
rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
|
|
||||||
|
|
||||||
radeon_fence_unref(&vm->fence);
|
radeon_fence_unref(&vm->fence);
|
||||||
vm->fence = radeon_fence_ref(fence);
|
vm->fence = radeon_fence_ref(fence);
|
||||||
|
|
||||||
|
radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
|
||||||
|
rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
|
||||||
|
|
||||||
radeon_fence_unref(&vm->last_id_use);
|
radeon_fence_unref(&vm->last_id_use);
|
||||||
vm->last_id_use = radeon_fence_ref(fence);
|
vm->last_id_use = radeon_fence_ref(fence);
|
||||||
|
|
||||||
|
/* we just flushed the VM, remember that */
|
||||||
|
if (!vm->last_flush)
|
||||||
|
vm->last_flush = radeon_fence_ref(fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue