mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: move align_mask and nop into ring funcs as well (v2)
They are constant as well. v2: update uvd and vce phys ring structures as well Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
21cd942e5c
commit
7988714237
|
@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
|
|||
{
|
||||
/* Align requested size with padding so unlock_commit can
|
||||
* pad safely */
|
||||
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
|
||||
ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
|
||||
|
||||
/* Make sure we aren't trying to allocate more space
|
||||
* than the maximum for one submission
|
||||
|
@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
amdgpu_ring_write(ring, ring->nop);
|
||||
amdgpu_ring_write(ring, ring->funcs->nop);
|
||||
}
|
||||
|
||||
/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
|
||||
|
@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
*/
|
||||
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
|
||||
{
|
||||
while (ib->length_dw & ring->align_mask)
|
||||
ib->ptr[ib->length_dw++] = ring->nop;
|
||||
while (ib->length_dw & ring->funcs->align_mask)
|
||||
ib->ptr[ib->length_dw++] = ring->funcs->nop;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
|
|||
uint32_t count;
|
||||
|
||||
/* We pad to match fetch size */
|
||||
count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
|
||||
count %= ring->align_mask + 1;
|
||||
count = ring->funcs->align_mask + 1 -
|
||||
(ring->wptr & ring->funcs->align_mask);
|
||||
count %= ring->funcs->align_mask + 1;
|
||||
ring->funcs->insert_nop(ring, count);
|
||||
|
||||
mb();
|
||||
|
@ -163,8 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
|
|||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
unsigned max_dw, u32 nop, u32 align_mask,
|
||||
struct amdgpu_irq_src *irq_src, unsigned irq_type)
|
||||
unsigned max_dw, struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
@ -215,8 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
|
||||
ring->ring_size = roundup_pow_of_two(max_dw * 4 *
|
||||
amdgpu_sched_hw_submission);
|
||||
ring->align_mask = align_mask;
|
||||
ring->nop = nop;
|
||||
|
||||
/* Allocate ring buffer */
|
||||
if (ring->ring_obj == NULL) {
|
||||
|
|
|
@ -93,6 +93,8 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
|
|||
/* provided by hw blocks that expose a ring buffer for commands */
|
||||
struct amdgpu_ring_funcs {
|
||||
enum amdgpu_ring_type type;
|
||||
uint32_t align_mask;
|
||||
u32 nop;
|
||||
|
||||
/* ring read/write ptr handling */
|
||||
u32 (*get_rptr)(struct amdgpu_ring *ring);
|
||||
|
@ -149,10 +151,8 @@ struct amdgpu_ring {
|
|||
unsigned max_dw;
|
||||
int count_dw;
|
||||
uint64_t gpu_addr;
|
||||
uint32_t align_mask;
|
||||
uint32_t ptr_mask;
|
||||
bool ready;
|
||||
u32 nop;
|
||||
u32 idx;
|
||||
u32 me;
|
||||
u32 pipe;
|
||||
|
@ -178,8 +178,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
|||
void amdgpu_ring_commit(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_undo(struct amdgpu_ring *ring);
|
||||
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
unsigned ring_size, u32 nop, u32 align_mask,
|
||||
struct amdgpu_irq_src *irq_src, unsigned irq_type);
|
||||
unsigned ring_size, struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
void amdgpu_ring_fini(struct amdgpu_ring *ring);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -206,10 +206,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
|
||||
for (i = 0; i < count; i++)
|
||||
if (sdma && sdma->burst_nop && (i == 0))
|
||||
amdgpu_ring_write(ring, ring->nop |
|
||||
amdgpu_ring_write(ring, ring->funcs->nop |
|
||||
SDMA_NOP_COUNT(count - 1));
|
||||
else
|
||||
amdgpu_ring_write(ring, ring->nop);
|
||||
amdgpu_ring_write(ring, ring->funcs->nop);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -943,7 +943,6 @@ static int cik_sdma_sw_init(void *handle)
|
|||
ring->ring_obj = NULL;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
|
@ -1210,6 +1209,8 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
|
||||
.get_rptr = cik_sdma_ring_get_rptr,
|
||||
.get_wptr = cik_sdma_ring_get_wptr,
|
||||
.set_wptr = cik_sdma_ring_set_wptr,
|
||||
|
|
|
@ -2869,7 +2869,6 @@ static int gfx_v6_0_sw_init(void *handle)
|
|||
ring->ring_obj = NULL;
|
||||
sprintf(ring->name, "gfx");
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
0x80000000, 0xff,
|
||||
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -2892,7 +2891,6 @@ static int gfx_v6_0_sw_init(void *handle)
|
|||
sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
0x80000000, 0xff,
|
||||
&adev->gfx.eop_irq, irq_type);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -3227,6 +3225,8 @@ const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
|
||||
.type = AMDGPU_RING_TYPE_GFX,
|
||||
.align_mask = 0xff,
|
||||
.nop = 0x80000000,
|
||||
.get_rptr = gfx_v6_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v6_0_ring_get_wptr,
|
||||
.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
|
||||
|
@ -3252,6 +3252,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
|
|||
|
||||
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
|
||||
.type = AMDGPU_RING_TYPE_COMPUTE,
|
||||
.align_mask = 0xff,
|
||||
.nop = 0x80000000,
|
||||
.get_rptr = gfx_v6_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v6_0_ring_get_wptr,
|
||||
.set_wptr = gfx_v6_0_ring_set_wptr_compute,
|
||||
|
|
|
@ -4611,7 +4611,6 @@ static int gfx_v7_0_sw_init(void *handle)
|
|||
ring->ring_obj = NULL;
|
||||
sprintf(ring->name, "gfx");
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
PACKET3(PACKET3_NOP, 0x3FFF), 0xff,
|
||||
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -4637,7 +4636,6 @@ static int gfx_v7_0_sw_init(void *handle)
|
|||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
|
||||
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
PACKET3(PACKET3_NOP, 0x3FFF), 0xff,
|
||||
&adev->gfx.eop_irq, irq_type);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -5108,6 +5106,8 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
||||
.type = AMDGPU_RING_TYPE_GFX,
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.get_rptr = gfx_v7_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
|
||||
|
@ -5136,6 +5136,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
|||
|
||||
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
||||
.type = AMDGPU_RING_TYPE_COMPUTE,
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.get_rptr = gfx_v7_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
|
||||
|
|
|
@ -2034,9 +2034,8 @@ static int gfx_v8_0_sw_init(void *handle)
|
|||
ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
PACKET3(PACKET3_NOP, 0x3FFF), 0xff,
|
||||
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
|
||||
AMDGPU_CP_IRQ_GFX_EOP);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -2060,9 +2059,8 @@ static int gfx_v8_0_sw_init(void *handle)
|
|||
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
|
||||
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
PACKET3(PACKET3_NOP, 0x3FFF), 0xff,
|
||||
&adev->gfx.eop_irq, irq_type);
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
|
||||
irq_type);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -6528,6 +6526,8 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
||||
.type = AMDGPU_RING_TYPE_GFX,
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.get_rptr = gfx_v8_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
|
||||
|
@ -6558,6 +6558,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
|||
|
||||
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
||||
.type = AMDGPU_RING_TYPE_COMPUTE,
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.get_rptr = gfx_v8_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
|
||||
|
|
|
@ -232,10 +232,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
|
||||
for (i = 0; i < count; i++)
|
||||
if (sdma && sdma->burst_nop && (i == 0))
|
||||
amdgpu_ring_write(ring, ring->nop |
|
||||
amdgpu_ring_write(ring, ring->funcs->nop |
|
||||
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
|
||||
else
|
||||
amdgpu_ring_write(ring, ring->nop);
|
||||
amdgpu_ring_write(ring, ring->funcs->nop);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -949,7 +949,6 @@ static int sdma_v2_4_sw_init(void *handle)
|
|||
ring->use_doorbell = false;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
|
@ -1207,6 +1206,8 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.get_rptr = sdma_v2_4_ring_get_rptr,
|
||||
.get_wptr = sdma_v2_4_ring_get_wptr,
|
||||
.set_wptr = sdma_v2_4_ring_set_wptr,
|
||||
|
|
|
@ -392,10 +392,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
|
||||
for (i = 0; i < count; i++)
|
||||
if (sdma && sdma->burst_nop && (i == 0))
|
||||
amdgpu_ring_write(ring, ring->nop |
|
||||
amdgpu_ring_write(ring, ring->funcs->nop |
|
||||
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
|
||||
else
|
||||
amdgpu_ring_write(ring, ring->nop);
|
||||
amdgpu_ring_write(ring, ring->funcs->nop);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1161,7 +1161,6 @@ static int sdma_v3_0_sw_init(void *handle)
|
|||
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
|
@ -1550,6 +1549,8 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.get_rptr = sdma_v3_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v3_0_ring_get_wptr,
|
||||
.set_wptr = sdma_v3_0_ring_set_wptr,
|
||||
|
|
|
@ -531,7 +531,6 @@ static int si_dma_sw_init(void *handle)
|
|||
ring->use_doorbell = false;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
|
@ -765,6 +764,8 @@ const struct amd_ip_funcs si_dma_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
|
||||
.get_rptr = si_dma_ring_get_rptr,
|
||||
.get_wptr = si_dma_ring_get_wptr,
|
||||
.set_wptr = si_dma_ring_set_wptr,
|
||||
|
|
|
@ -116,8 +116,7 @@ static int uvd_v4_2_sw_init(void *handle)
|
|||
|
||||
ring = &adev->uvd.ring;
|
||||
sprintf(ring->name, "uvd");
|
||||
r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
|
||||
&adev->uvd.irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -743,6 +742,8 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_UVD,
|
||||
.align_mask = 0xf,
|
||||
.nop = PACKET0(mmUVD_NO_OP, 0),
|
||||
.get_rptr = uvd_v4_2_ring_get_rptr,
|
||||
.get_wptr = uvd_v4_2_ring_get_wptr,
|
||||
.set_wptr = uvd_v4_2_ring_set_wptr,
|
||||
|
|
|
@ -112,8 +112,7 @@ static int uvd_v5_0_sw_init(void *handle)
|
|||
|
||||
ring = &adev->uvd.ring;
|
||||
sprintf(ring->name, "uvd");
|
||||
r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
|
||||
&adev->uvd.irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -794,6 +793,8 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_UVD,
|
||||
.align_mask = 0xf,
|
||||
.nop = PACKET0(mmUVD_NO_OP, 0),
|
||||
.get_rptr = uvd_v5_0_ring_get_rptr,
|
||||
.get_wptr = uvd_v5_0_ring_get_wptr,
|
||||
.set_wptr = uvd_v5_0_ring_set_wptr,
|
||||
|
|
|
@ -116,8 +116,7 @@ static int uvd_v6_0_sw_init(void *handle)
|
|||
|
||||
ring = &adev->uvd.ring;
|
||||
sprintf(ring->name, "uvd");
|
||||
r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
|
||||
&adev->uvd.irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -1024,6 +1023,8 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_UVD,
|
||||
.align_mask = 0xf,
|
||||
.nop = PACKET0(mmUVD_NO_OP, 0),
|
||||
.get_rptr = uvd_v6_0_ring_get_rptr,
|
||||
.get_wptr = uvd_v6_0_ring_get_wptr,
|
||||
.set_wptr = uvd_v6_0_ring_set_wptr,
|
||||
|
@ -1048,6 +1049,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_UVD,
|
||||
.align_mask = 0xf,
|
||||
.nop = PACKET0(mmUVD_NO_OP, 0),
|
||||
.get_rptr = uvd_v6_0_ring_get_rptr,
|
||||
.get_wptr = uvd_v6_0_ring_get_wptr,
|
||||
.set_wptr = uvd_v6_0_ring_set_wptr,
|
||||
|
|
|
@ -224,7 +224,7 @@ static int vce_v2_0_sw_init(void *handle)
|
|||
for (i = 0; i < adev->vce.num_rings; i++) {
|
||||
ring = &adev->vce.ring[i];
|
||||
sprintf(ring->name, "vce%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
|
||||
r = amdgpu_ring_init(adev, ring, 512,
|
||||
&adev->vce.irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -611,6 +611,8 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCE,
|
||||
.align_mask = 0xf,
|
||||
.nop = VCE_CMD_NO_OP,
|
||||
.get_rptr = vce_v2_0_ring_get_rptr,
|
||||
.get_wptr = vce_v2_0_ring_get_wptr,
|
||||
.set_wptr = vce_v2_0_ring_set_wptr,
|
||||
|
|
|
@ -389,8 +389,7 @@ static int vce_v3_0_sw_init(void *handle)
|
|||
for (i = 0; i < adev->vce.num_rings; i++) {
|
||||
ring = &adev->vce.ring[i];
|
||||
sprintf(ring->name, "vce%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
|
||||
&adev->vce.irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -830,6 +829,8 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCE,
|
||||
.align_mask = 0xf,
|
||||
.nop = VCE_CMD_NO_OP,
|
||||
.get_rptr = vce_v3_0_ring_get_rptr,
|
||||
.get_wptr = vce_v3_0_ring_get_wptr,
|
||||
.set_wptr = vce_v3_0_ring_set_wptr,
|
||||
|
@ -850,6 +851,8 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
|
|||
|
||||
static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCE,
|
||||
.align_mask = 0xf,
|
||||
.nop = VCE_CMD_NO_OP,
|
||||
.get_rptr = vce_v3_0_ring_get_rptr,
|
||||
.get_wptr = vce_v3_0_ring_get_wptr,
|
||||
.set_wptr = vce_v3_0_ring_set_wptr,
|
||||
|
|
Loading…
Reference in New Issue