drm/radeon: fix CP semaphores on CIK

The CP semaphore queue on CIK has a bug that triggers if uncompleted
waits use the same address while a signal is still pending. Work around
this by using different addresses for each sync.

Signed-off-by: Christian König <christian.koenig@amd.com>
Cc: stable@vger.kernel.org
This commit is contained in:
Christian König 2014-02-18 01:50:22 -07:00
parent d02f8575f1
commit 1c61eae469
3 changed files with 20 additions and 5 deletions

View File

@ -135,6 +135,9 @@ extern int radeon_hard_reset;
/* R600+ */ /* R600+ */
#define R600_RING_TYPE_UVD_INDEX 5 #define R600_RING_TYPE_UVD_INDEX 5
/* number of hw syncs before falling back on blocking */
#define RADEON_NUM_SYNCS 4
/* hardcode those limit for now */ /* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20) #define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20) #define RADEON_VA_RESERVED_SIZE (8 << 20)
@ -554,7 +557,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
/* /*
* Semaphores. * Semaphores.
*/ */
/* everything here is constant */
struct radeon_semaphore { struct radeon_semaphore {
struct radeon_sa_bo *sa_bo; struct radeon_sa_bo *sa_bo;
signed waiters; signed waiters;

View File

@ -139,7 +139,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
} }
/* 64 dwords should be enough for fence too */ /* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
if (r) { if (r) {
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r; return r;

View File

@ -34,14 +34,15 @@
int radeon_semaphore_create(struct radeon_device *rdev, int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore) struct radeon_semaphore **semaphore)
{ {
uint32_t *cpu_addr;
int i, r; int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) { if (*semaphore == NULL) {
return -ENOMEM; return -ENOMEM;
} }
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
&(*semaphore)->sa_bo, 8, 8, true); 8 * RADEON_NUM_SYNCS, 8, true);
if (r) { if (r) {
kfree(*semaphore); kfree(*semaphore);
*semaphore = NULL; *semaphore = NULL;
@ -49,7 +50,10 @@ int radeon_semaphore_create(struct radeon_device *rdev,
} }
(*semaphore)->waiters = 0; (*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
for (i = 0; i < RADEON_NUM_SYNCS; ++i)
cpu_addr[i] = 0;
for (i = 0; i < RADEON_NUM_RINGS; ++i) for (i = 0; i < RADEON_NUM_RINGS; ++i)
(*semaphore)->sync_to[i] = NULL; (*semaphore)->sync_to[i] = NULL;
@ -125,6 +129,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
int ring) int ring)
{ {
unsigned count = 0;
int i, r; int i, r;
for (i = 0; i < RADEON_NUM_RINGS; ++i) { for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@ -140,6 +145,12 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
return -EINVAL; return -EINVAL;
} }
if (++count > RADEON_NUM_SYNCS) {
/* not enough room, wait manually */
radeon_fence_wait_locked(fence);
continue;
}
/* allocate enough space for sync command */ /* allocate enough space for sync command */
r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
if (r) { if (r) {
@ -164,6 +175,8 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
radeon_ring_commit(rdev, &rdev->ring[i]); radeon_ring_commit(rdev, &rdev->ring[i]);
radeon_fence_note_sync(fence, ring); radeon_fence_note_sync(fence, ring);
semaphore->gpu_addr += 8;
} }
return 0; return 0;