mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: replace kcq enable/disable functions on gfx_v9
[Why] There are HW-indpendent functions that enables and disables kcq. These functions use the kiq_pm4_funcs implementation. [How] Local kcq enable and disable functions removed and replace it by the generic kcq enable under amdgpu_gfx Signed-off-by: Alex Sierra <alex.sierra@amd.com> Acked-by: Christian König <christian.koenig@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
58e508b6be
commit
4f01f1e58e
|
@ -3234,74 +3234,6 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
|
||||||
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
|
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
|
|
||||||
uint64_t queue_mask = 0;
|
|
||||||
int r, i;
|
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
|
|
||||||
if (!test_bit(i, adev->gfx.mec.queue_bitmap))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* This situation may be hit in the future if a new HW
|
|
||||||
* generation exposes more than 64 queues. If so, the
|
|
||||||
* definition of queue_mask needs updating */
|
|
||||||
if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
|
|
||||||
DRM_ERROR("Invalid KCQ enabled: %d\n", i);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
queue_mask |= (1ull << i);
|
|
||||||
}
|
|
||||||
|
|
||||||
r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
|
|
||||||
if (r) {
|
|
||||||
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* set resources */
|
|
||||||
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
|
|
||||||
amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
|
|
||||||
PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
|
|
||||||
amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
|
|
||||||
amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
|
|
||||||
amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
|
|
||||||
amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
|
|
||||||
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
|
|
||||||
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
|
|
||||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
|
||||||
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
|
|
||||||
uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
|
|
||||||
uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
|
|
||||||
|
|
||||||
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
|
|
||||||
/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
|
|
||||||
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
|
|
||||||
PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
|
|
||||||
PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
|
|
||||||
PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
|
|
||||||
PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
|
|
||||||
PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
|
|
||||||
PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
|
|
||||||
PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
|
|
||||||
PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
|
|
||||||
PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
|
|
||||||
amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
|
|
||||||
amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
|
|
||||||
amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
|
|
||||||
amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
|
|
||||||
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
|
|
||||||
}
|
|
||||||
|
|
||||||
r = amdgpu_ring_test_helper(kiq_ring);
|
|
||||||
if (r)
|
|
||||||
DRM_ERROR("KCQ enable failed\n");
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
|
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
@ -3708,7 +3640,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = gfx_v9_0_kiq_kcq_enable(adev);
|
r = amdgpu_gfx_enable_kcq(adev);
|
||||||
done:
|
done:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -3812,36 +3744,6 @@ static int gfx_v9_0_hw_init(void *handle)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int r, i;
|
|
||||||
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
|
|
||||||
|
|
||||||
r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
|
|
||||||
if (r)
|
|
||||||
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
|
|
||||||
|
|
||||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
|
||||||
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
|
|
||||||
|
|
||||||
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
|
|
||||||
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
|
|
||||||
PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
|
|
||||||
PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
|
|
||||||
PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
|
|
||||||
PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
|
|
||||||
amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
|
|
||||||
amdgpu_ring_write(kiq_ring, 0);
|
|
||||||
amdgpu_ring_write(kiq_ring, 0);
|
|
||||||
amdgpu_ring_write(kiq_ring, 0);
|
|
||||||
}
|
|
||||||
r = amdgpu_ring_test_helper(kiq_ring);
|
|
||||||
if (r)
|
|
||||||
DRM_ERROR("KCQ disable failed\n");
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gfx_v9_0_hw_fini(void *handle)
|
static int gfx_v9_0_hw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
@ -3853,7 +3755,7 @@ static int gfx_v9_0_hw_fini(void *handle)
|
||||||
/* DF freeze and kcq disable will fail */
|
/* DF freeze and kcq disable will fail */
|
||||||
if (!amdgpu_ras_intr_triggered())
|
if (!amdgpu_ras_intr_triggered())
|
||||||
/* disable KCQ to avoid CPC touch memory not valid anymore */
|
/* disable KCQ to avoid CPC touch memory not valid anymore */
|
||||||
gfx_v9_0_kcq_disable(adev);
|
amdgpu_gfx_disable_kcq(adev);
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
gfx_v9_0_cp_gfx_enable(adev, false);
|
gfx_v9_0_cp_gfx_enable(adev, false);
|
||||||
|
|
Loading…
Reference in New Issue