mirror of https://gitee.com/openkylin/linux.git
drm/radeon: make the ib an inline object
No need to malloc it any more. Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Christian König <deathsimple@vodafone.de> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
f237750f00
commit
f2e3922106
|
@ -1057,7 +1057,7 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
|||
uint32_t header, h_idx, reg, wait_reg_mem_info;
|
||||
volatile uint32_t *ib;
|
||||
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
|
||||
/* parse the WAIT_REG_MEM */
|
||||
r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
|
||||
|
@ -1215,7 +1215,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
|||
if (!(evergreen_reg_safe_bm[i] & m))
|
||||
return 0;
|
||||
}
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
switch (reg) {
|
||||
/* force following reg to 0 in an attempt to disable out buffer
|
||||
* which will need us to better understand how it works to perform
|
||||
|
@ -1896,7 +1896,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
|
|||
u32 idx_value;
|
||||
|
||||
track = (struct evergreen_cs_track *)p->track;
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
idx = pkt->idx + 1;
|
||||
idx_value = radeon_get_ib_value(p, idx);
|
||||
|
||||
|
@ -2610,8 +2610,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
|
|||
}
|
||||
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
|
||||
#if 0
|
||||
for (r = 0; r < p->ib->length_dw; r++) {
|
||||
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
|
||||
for (r = 0; r < p->ib.length_dw; r++) {
|
||||
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
|
||||
mdelay(1);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -139,9 +139,9 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
tmp |= tile_flags;
|
||||
p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
|
||||
p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
|
||||
} else
|
||||
p->ib->ptr[idx] = (value & 0xffc00000) | tmp;
|
||||
p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
|
|||
volatile uint32_t *ib;
|
||||
u32 idx_value;
|
||||
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
track = (struct r100_cs_track *)p->track;
|
||||
c = radeon_get_ib_value(p, idx++) & 0x1F;
|
||||
if (c > 16) {
|
||||
|
@ -1275,7 +1275,7 @@ void r100_cs_dump_packet(struct radeon_cs_parser *p,
|
|||
unsigned i;
|
||||
unsigned idx;
|
||||
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
idx = pkt->idx;
|
||||
for (i = 0; i <= (pkt->count + 1); i++, idx++) {
|
||||
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
|
||||
|
@ -1354,7 +1354,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
|||
uint32_t header, h_idx, reg;
|
||||
volatile uint32_t *ib;
|
||||
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
|
||||
/* parse the wait until */
|
||||
r = r100_cs_packet_parse(p, &waitreloc, p->idx);
|
||||
|
@ -1533,7 +1533,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
|||
u32 tile_flags = 0;
|
||||
u32 idx_value;
|
||||
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
track = (struct r100_cs_track *)p->track;
|
||||
|
||||
idx_value = radeon_get_ib_value(p, idx);
|
||||
|
@ -1889,7 +1889,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
|
|||
volatile uint32_t *ib;
|
||||
int r;
|
||||
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
idx = pkt->idx + 1;
|
||||
track = (struct r100_cs_track *)p->track;
|
||||
switch (pkt->opcode) {
|
||||
|
@ -3684,7 +3684,7 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||
|
||||
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
struct radeon_ib *ib;
|
||||
struct radeon_ib ib;
|
||||
uint32_t scratch;
|
||||
uint32_t tmp = 0;
|
||||
unsigned i;
|
||||
|
@ -3700,22 +3700,22 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
if (r) {
|
||||
return r;
|
||||
}
|
||||
ib->ptr[0] = PACKET0(scratch, 0);
|
||||
ib->ptr[1] = 0xDEADBEEF;
|
||||
ib->ptr[2] = PACKET2(0);
|
||||
ib->ptr[3] = PACKET2(0);
|
||||
ib->ptr[4] = PACKET2(0);
|
||||
ib->ptr[5] = PACKET2(0);
|
||||
ib->ptr[6] = PACKET2(0);
|
||||
ib->ptr[7] = PACKET2(0);
|
||||
ib->length_dw = 8;
|
||||
r = radeon_ib_schedule(rdev, ib);
|
||||
ib.ptr[0] = PACKET0(scratch, 0);
|
||||
ib.ptr[1] = 0xDEADBEEF;
|
||||
ib.ptr[2] = PACKET2(0);
|
||||
ib.ptr[3] = PACKET2(0);
|
||||
ib.ptr[4] = PACKET2(0);
|
||||
ib.ptr[5] = PACKET2(0);
|
||||
ib.ptr[6] = PACKET2(0);
|
||||
ib.ptr[7] = PACKET2(0);
|
||||
ib.length_dw = 8;
|
||||
r = radeon_ib_schedule(rdev, &ib);
|
||||
if (r) {
|
||||
radeon_scratch_free(rdev, scratch);
|
||||
radeon_ib_free(rdev, &ib);
|
||||
return r;
|
||||
}
|
||||
r = radeon_fence_wait(ib->fence, false);
|
||||
r = radeon_fence_wait(ib.fence, false);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
|||
u32 tile_flags = 0;
|
||||
u32 idx_value;
|
||||
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
track = (struct r100_cs_track *)p->track;
|
||||
idx_value = radeon_get_ib_value(p, idx);
|
||||
switch (reg) {
|
||||
|
|
|
@ -604,7 +604,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
|||
int r;
|
||||
u32 idx_value;
|
||||
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
track = (struct r100_cs_track *)p->track;
|
||||
idx_value = radeon_get_ib_value(p, idx);
|
||||
|
||||
|
@ -1146,7 +1146,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
|
|||
unsigned idx;
|
||||
int r;
|
||||
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
idx = pkt->idx + 1;
|
||||
track = (struct r100_cs_track *)p->track;
|
||||
switch(pkt->opcode) {
|
||||
|
|
|
@ -2681,7 +2681,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||
|
||||
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
struct radeon_ib *ib;
|
||||
struct radeon_ib ib;
|
||||
uint32_t scratch;
|
||||
uint32_t tmp = 0;
|
||||
unsigned i;
|
||||
|
@ -2699,18 +2699,18 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
|
||||
ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
|
||||
ib->ptr[2] = 0xDEADBEEF;
|
||||
ib->length_dw = 3;
|
||||
r = radeon_ib_schedule(rdev, ib);
|
||||
ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
|
||||
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
|
||||
ib.ptr[2] = 0xDEADBEEF;
|
||||
ib.length_dw = 3;
|
||||
r = radeon_ib_schedule(rdev, &ib);
|
||||
if (r) {
|
||||
radeon_scratch_free(rdev, scratch);
|
||||
radeon_ib_free(rdev, &ib);
|
||||
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_fence_wait(ib->fence, false);
|
||||
r = radeon_fence_wait(ib.fence, false);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
|
||||
return r;
|
||||
|
@ -2722,7 +2722,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
DRM_UDELAY(1);
|
||||
}
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
|
||||
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
||||
scratch, tmp);
|
||||
|
|
|
@ -345,7 +345,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
|
|||
u32 height, height_align, pitch, pitch_align, depth_align;
|
||||
u64 base_offset, base_align;
|
||||
struct array_mode_checker array_check;
|
||||
volatile u32 *ib = p->ib->ptr;
|
||||
volatile u32 *ib = p->ib.ptr;
|
||||
unsigned array_mode;
|
||||
u32 format;
|
||||
|
||||
|
@ -471,7 +471,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
|
|||
u64 base_offset, base_align;
|
||||
struct array_mode_checker array_check;
|
||||
int array_mode;
|
||||
volatile u32 *ib = p->ib->ptr;
|
||||
volatile u32 *ib = p->ib.ptr;
|
||||
|
||||
|
||||
if (track->db_bo == NULL) {
|
||||
|
@ -961,7 +961,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
|||
uint32_t header, h_idx, reg, wait_reg_mem_info;
|
||||
volatile uint32_t *ib;
|
||||
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
|
||||
/* parse the WAIT_REG_MEM */
|
||||
r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
|
||||
|
@ -1110,7 +1110,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
|||
m = 1 << ((reg >> 2) & 31);
|
||||
if (!(r600_reg_safe_bm[i] & m))
|
||||
return 0;
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
switch (reg) {
|
||||
/* force following reg to 0 in an attempt to disable out buffer
|
||||
* which will need us to better understand how it works to perform
|
||||
|
@ -1714,7 +1714,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
|||
u32 idx_value;
|
||||
|
||||
track = (struct r600_cs_track *)p->track;
|
||||
ib = p->ib->ptr;
|
||||
ib = p->ib.ptr;
|
||||
idx = pkt->idx + 1;
|
||||
idx_value = radeon_get_ib_value(p, idx);
|
||||
|
||||
|
@ -2249,8 +2249,8 @@ int r600_cs_parse(struct radeon_cs_parser *p)
|
|||
}
|
||||
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
|
||||
#if 0
|
||||
for (r = 0; r < p->ib->length_dw; r++) {
|
||||
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
|
||||
for (r = 0; r < p->ib.length_dw; r++) {
|
||||
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
|
||||
mdelay(1);
|
||||
}
|
||||
#endif
|
||||
|
@ -2298,7 +2298,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
|
|||
{
|
||||
struct radeon_cs_parser parser;
|
||||
struct radeon_cs_chunk *ib_chunk;
|
||||
struct radeon_ib fake_ib;
|
||||
struct r600_cs_track *track;
|
||||
int r;
|
||||
|
||||
|
@ -2314,9 +2313,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
|
|||
parser.dev = &dev->pdev->dev;
|
||||
parser.rdev = NULL;
|
||||
parser.family = family;
|
||||
parser.ib = &fake_ib;
|
||||
parser.track = track;
|
||||
fake_ib.ptr = ib;
|
||||
parser.ib.ptr = ib;
|
||||
r = radeon_cs_parser_init(&parser, data);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to initialize parser !\n");
|
||||
|
@ -2333,8 +2331,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
|
|||
* input memory (cached) and write to the IB (which can be
|
||||
* uncached). */
|
||||
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
|
||||
parser.ib->length_dw = ib_chunk->length_dw;
|
||||
*l = parser.ib->length_dw;
|
||||
parser.ib.length_dw = ib_chunk->length_dw;
|
||||
*l = parser.ib.length_dw;
|
||||
r = r600_cs_parse(&parser);
|
||||
if (r) {
|
||||
DRM_ERROR("Invalid command stream !\n");
|
||||
|
|
|
@ -769,8 +769,8 @@ struct si_rlc {
|
|||
};
|
||||
|
||||
int radeon_ib_get(struct radeon_device *rdev, int ring,
|
||||
struct radeon_ib **ib, unsigned size);
|
||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
|
||||
struct radeon_ib *ib, unsigned size);
|
||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int radeon_ib_pool_init(struct radeon_device *rdev);
|
||||
void radeon_ib_pool_fini(struct radeon_device *rdev);
|
||||
|
@ -838,8 +838,8 @@ struct radeon_cs_parser {
|
|||
int chunk_relocs_idx;
|
||||
int chunk_flags_idx;
|
||||
int chunk_const_ib_idx;
|
||||
struct radeon_ib *ib;
|
||||
struct radeon_ib *const_ib;
|
||||
struct radeon_ib ib;
|
||||
struct radeon_ib const_ib;
|
||||
void *track;
|
||||
unsigned family;
|
||||
int parser_error;
|
||||
|
|
|
@ -138,12 +138,12 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
|
|||
return 0;
|
||||
}
|
||||
|
||||
r = radeon_semaphore_create(p->rdev, &p->ib->semaphore);
|
||||
r = radeon_semaphore_create(p->rdev, &p->ib.semaphore);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
return radeon_semaphore_sync_rings(p->rdev, p->ib->semaphore,
|
||||
return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore,
|
||||
sync_to_ring, p->ring);
|
||||
}
|
||||
|
||||
|
@ -161,8 +161,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|||
/* get chunks */
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
p->idx = 0;
|
||||
p->ib = NULL;
|
||||
p->const_ib = NULL;
|
||||
p->ib.sa_bo = NULL;
|
||||
p->ib.semaphore = NULL;
|
||||
p->const_ib.sa_bo = NULL;
|
||||
p->const_ib.semaphore = NULL;
|
||||
p->chunk_ib_idx = -1;
|
||||
p->chunk_relocs_idx = -1;
|
||||
p->chunk_flags_idx = -1;
|
||||
|
@ -301,10 +303,9 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
|
|||
{
|
||||
unsigned i;
|
||||
|
||||
|
||||
if (!error && parser->ib)
|
||||
if (!error)
|
||||
ttm_eu_fence_buffer_objects(&parser->validated,
|
||||
parser->ib->fence);
|
||||
parser->ib.fence);
|
||||
else
|
||||
ttm_eu_backoff_reservation(&parser->validated);
|
||||
|
||||
|
@ -327,9 +328,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
|
|||
kfree(parser->chunks);
|
||||
kfree(parser->chunks_array);
|
||||
radeon_ib_free(parser->rdev, &parser->ib);
|
||||
if (parser->const_ib) {
|
||||
radeon_ib_free(parser->rdev, &parser->const_ib);
|
||||
}
|
||||
radeon_ib_free(parser->rdev, &parser->const_ib);
|
||||
}
|
||||
|
||||
static int radeon_cs_ib_chunk(struct radeon_device *rdev,
|
||||
|
@ -355,7 +354,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
|
|||
DRM_ERROR("Failed to get ib !\n");
|
||||
return r;
|
||||
}
|
||||
parser->ib->length_dw = ib_chunk->length_dw;
|
||||
parser->ib.length_dw = ib_chunk->length_dw;
|
||||
r = radeon_cs_parse(rdev, parser->ring, parser);
|
||||
if (r || parser->parser_error) {
|
||||
DRM_ERROR("Invalid command stream !\n");
|
||||
|
@ -370,8 +369,8 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
|
|||
if (r) {
|
||||
DRM_ERROR("Failed to synchronize rings !\n");
|
||||
}
|
||||
parser->ib->vm_id = 0;
|
||||
r = radeon_ib_schedule(rdev, parser->ib);
|
||||
parser->ib.vm_id = 0;
|
||||
r = radeon_ib_schedule(rdev, &parser->ib);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to schedule IB !\n");
|
||||
}
|
||||
|
@ -422,14 +421,14 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
|||
DRM_ERROR("Failed to get const ib !\n");
|
||||
return r;
|
||||
}
|
||||
parser->const_ib->is_const_ib = true;
|
||||
parser->const_ib->length_dw = ib_chunk->length_dw;
|
||||
parser->const_ib.is_const_ib = true;
|
||||
parser->const_ib.length_dw = ib_chunk->length_dw;
|
||||
/* Copy the packet into the IB */
|
||||
if (DRM_COPY_FROM_USER(parser->const_ib->ptr, ib_chunk->user_ptr,
|
||||
if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
|
||||
ib_chunk->length_dw * 4)) {
|
||||
return -EFAULT;
|
||||
}
|
||||
r = radeon_ring_ib_parse(rdev, parser->ring, parser->const_ib);
|
||||
r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
@ -446,13 +445,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
|||
DRM_ERROR("Failed to get ib !\n");
|
||||
return r;
|
||||
}
|
||||
parser->ib->length_dw = ib_chunk->length_dw;
|
||||
parser->ib.length_dw = ib_chunk->length_dw;
|
||||
/* Copy the packet into the IB */
|
||||
if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
|
||||
if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
|
||||
ib_chunk->length_dw * 4)) {
|
||||
return -EFAULT;
|
||||
}
|
||||
r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
|
||||
r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
@ -473,29 +472,29 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
|||
|
||||
if ((rdev->family >= CHIP_TAHITI) &&
|
||||
(parser->chunk_const_ib_idx != -1)) {
|
||||
parser->const_ib->vm_id = vm->id;
|
||||
parser->const_ib.vm_id = vm->id;
|
||||
/* ib pool is bind at 0 in virtual address space to gpu_addr is the
|
||||
* offset inside the pool bo
|
||||
*/
|
||||
parser->const_ib->gpu_addr = parser->const_ib->sa_bo->soffset;
|
||||
r = radeon_ib_schedule(rdev, parser->const_ib);
|
||||
parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
|
||||
r = radeon_ib_schedule(rdev, &parser->const_ib);
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
|
||||
parser->ib->vm_id = vm->id;
|
||||
parser->ib.vm_id = vm->id;
|
||||
/* ib pool is bind at 0 in virtual address space to gpu_addr is the
|
||||
* offset inside the pool bo
|
||||
*/
|
||||
parser->ib->gpu_addr = parser->ib->sa_bo->soffset;
|
||||
parser->ib->is_const_ib = false;
|
||||
r = radeon_ib_schedule(rdev, parser->ib);
|
||||
parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
|
||||
parser->ib.is_const_ib = false;
|
||||
r = radeon_ib_schedule(rdev, &parser->ib);
|
||||
out:
|
||||
if (!r) {
|
||||
if (vm->fence) {
|
||||
radeon_fence_unref(&vm->fence);
|
||||
}
|
||||
vm->fence = radeon_fence_ref(parser->ib->fence);
|
||||
vm->fence = radeon_fence_ref(parser->ib.fence);
|
||||
}
|
||||
mutex_unlock(&fpriv->vm.mutex);
|
||||
return r;
|
||||
|
@ -573,7 +572,7 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p)
|
|||
size = PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
|
||||
if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
|
||||
ibc->user_ptr + (i * PAGE_SIZE),
|
||||
size))
|
||||
return -EFAULT;
|
||||
|
@ -590,7 +589,7 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
|
|||
bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
|
||||
|
||||
for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
|
||||
if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
|
||||
if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
|
||||
ibc->user_ptr + (i * PAGE_SIZE),
|
||||
PAGE_SIZE)) {
|
||||
p->parser_error = -EFAULT;
|
||||
|
@ -606,7 +605,7 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
|
|||
|
||||
new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
|
||||
if (copy1)
|
||||
ibc->kpage[new_page] = p->ib->ptr + (pg_idx * (PAGE_SIZE / 4));
|
||||
ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
|
||||
|
||||
if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
|
||||
ibc->user_ptr + (pg_idx * PAGE_SIZE),
|
||||
|
@ -617,7 +616,7 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
|
|||
|
||||
/* copy to IB for non single case */
|
||||
if (!copy1)
|
||||
memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
|
||||
memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
|
||||
|
||||
ibc->last_copied_page = pg_idx;
|
||||
ibc->kpage_idx[new_page] = pg_idx;
|
||||
|
|
|
@ -65,51 +65,36 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
|
|||
}
|
||||
|
||||
int radeon_ib_get(struct radeon_device *rdev, int ring,
|
||||
struct radeon_ib **ib, unsigned size)
|
||||
struct radeon_ib *ib, unsigned size)
|
||||
{
|
||||
int r;
|
||||
|
||||
*ib = kmalloc(sizeof(struct radeon_ib), GFP_KERNEL);
|
||||
if (*ib == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*ib)->sa_bo, size, 256, true);
|
||||
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
|
||||
kfree(*ib);
|
||||
*ib = NULL;
|
||||
return r;
|
||||
}
|
||||
r = radeon_fence_create(rdev, &(*ib)->fence, ring);
|
||||
r = radeon_fence_create(rdev, &ib->fence, ring);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r);
|
||||
radeon_sa_bo_free(rdev, &(*ib)->sa_bo, NULL);
|
||||
kfree(*ib);
|
||||
*ib = NULL;
|
||||
radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
|
||||
return r;
|
||||
}
|
||||
|
||||
(*ib)->ptr = radeon_sa_bo_cpu_addr((*ib)->sa_bo);
|
||||
(*ib)->gpu_addr = radeon_sa_bo_gpu_addr((*ib)->sa_bo);
|
||||
(*ib)->vm_id = 0;
|
||||
(*ib)->is_const_ib = false;
|
||||
(*ib)->semaphore = NULL;
|
||||
ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
|
||||
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
|
||||
ib->vm_id = 0;
|
||||
ib->is_const_ib = false;
|
||||
ib->semaphore = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
|
||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_ib *tmp = *ib;
|
||||
|
||||
*ib = NULL;
|
||||
if (tmp == NULL) {
|
||||
return;
|
||||
}
|
||||
radeon_semaphore_free(rdev, tmp->semaphore, tmp->fence);
|
||||
radeon_sa_bo_free(rdev, &tmp->sa_bo, tmp->fence);
|
||||
radeon_fence_unref(&tmp->fence);
|
||||
kfree(tmp);
|
||||
radeon_semaphore_free(rdev, ib->semaphore, ib->fence);
|
||||
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
|
||||
radeon_fence_unref(&ib->fence);
|
||||
}
|
||||
|
||||
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
|
|
Loading…
Reference in New Issue