drm/amdgpu: support cond exec
This adds the groundwork for conditional execution on SDMA which is necessary for preemption. Signed-off-by: Monk Liu <monk.liu@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
8e9fbeb522
commit
128cff1af6
|
@ -788,6 +788,9 @@ struct amdgpu_ring {
|
||||||
struct amdgpu_ctx *current_ctx;
|
struct amdgpu_ctx *current_ctx;
|
||||||
enum amdgpu_ring_type type;
|
enum amdgpu_ring_type type;
|
||||||
char name[16];
|
char name[16];
|
||||||
|
unsigned cond_exe_offs;
|
||||||
|
u64 cond_exe_gpu_addr;
|
||||||
|
volatile u32 *cond_exe_cpu_addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -160,6 +160,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
amdgpu_ring_emit_hdp_flush(ring);
|
amdgpu_ring_emit_hdp_flush(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* always set cond_exec_polling to CONTINUE */
|
||||||
|
*ring->cond_exe_cpu_addr = 1;
|
||||||
|
|
||||||
old_ctx = ring->current_ctx;
|
old_ctx = ring->current_ctx;
|
||||||
for (i = 0; i < num_ibs; ++i) {
|
for (i = 0; i < num_ibs; ++i) {
|
||||||
ib = &ibs[i];
|
ib = &ibs[i];
|
||||||
|
|
|
@ -267,6 +267,15 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||||
}
|
}
|
||||||
ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
|
ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
|
||||||
ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
|
ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
|
||||||
|
|
||||||
|
r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
|
||||||
|
if (r) {
|
||||||
|
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
|
||||||
|
ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
|
||||||
|
|
||||||
spin_lock_init(&ring->fence_lock);
|
spin_lock_init(&ring->fence_lock);
|
||||||
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
|
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
|
Loading…
Reference in New Issue