Merge tag 'drm-next-5.5-2019-11-22' of git://people.freedesktop.org/~agd5f/linux into drm-next
drm-next-5.5-2019-11-22: amdgpu: - Fix bad DMA on some PPC platforms - MMHUB fix for powergating - BACO fix for Navi - Misc raven fixes - Enable vbios fetch directly from rom on navi - debugfs fix for DC - SR-IOV fixes for arcturus - Misc power fixes radeon: - Fix bad DMA on some PPC platforms Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191122203025.3787-1-alexander.deucher@amd.com
This commit is contained in:
commit
acc61b8929
|
@ -3109,9 +3109,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||
int r;
|
||||
|
||||
DRM_INFO("amdgpu: finishing device.\n");
|
||||
adev->shutdown = true;
|
||||
|
||||
flush_delayed_work(&adev->delayed_init_work);
|
||||
adev->shutdown = true;
|
||||
|
||||
/* disable all interrupts */
|
||||
amdgpu_irq_disable_all(adev);
|
||||
|
@ -3130,7 +3129,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||
adev->firmware.gpu_info_fw = NULL;
|
||||
}
|
||||
adev->accel_working = false;
|
||||
cancel_delayed_work_sync(&adev->delayed_init_work);
|
||||
/* free i2c buses */
|
||||
if (!amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_i2c_fini(adev);
|
||||
|
|
|
@ -514,7 +514,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
|
|||
* Also, don't allow GTT domain if the BO doens't have USWC falg set.
|
||||
*/
|
||||
if (adev->asic_type >= CHIP_CARRIZO &&
|
||||
adev->asic_type <= CHIP_RAVEN &&
|
||||
adev->asic_type < CHIP_RAVEN &&
|
||||
(adev->flags & AMD_IS_APU) &&
|
||||
(bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
|
||||
amdgpu_bo_support_uswc(bo_flags) &&
|
||||
|
|
|
@ -998,10 +998,10 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
|
||||
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
|
||||
/* Navi14 */
|
||||
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
|
||||
{0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
|
||||
{0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
|
||||
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
|
||||
|
||||
/* Renoir */
|
||||
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
|
||||
|
|
|
@ -454,8 +454,6 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
ring = &adev->gfx.kiq.ring;
|
||||
if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring)
|
||||
kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]);
|
||||
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
|
||||
amdgpu_bo_free_kernel(&ring->mqd_obj,
|
||||
&ring->mqd_gpu_addr,
|
||||
|
|
|
@ -225,7 +225,7 @@ struct amdgpu_me {
|
|||
uint32_t num_me;
|
||||
uint32_t num_pipe_per_me;
|
||||
uint32_t num_queue_per_pipe;
|
||||
void *mqd_backup[AMDGPU_MAX_GFX_RINGS + 1];
|
||||
void *mqd_backup[AMDGPU_MAX_GFX_RINGS];
|
||||
|
||||
/* These are the resources for which amdgpu takes ownership */
|
||||
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
|
||||
|
|
|
@ -77,6 +77,7 @@ struct amdgpu_gmc_fault {
|
|||
struct amdgpu_vmhub {
|
||||
uint32_t ctx0_ptb_addr_lo32;
|
||||
uint32_t ctx0_ptb_addr_hi32;
|
||||
uint32_t vm_inv_eng0_sem;
|
||||
uint32_t vm_inv_eng0_req;
|
||||
uint32_t vm_inv_eng0_ack;
|
||||
uint32_t vm_context0_cntl;
|
||||
|
|
|
@ -655,15 +655,19 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
return -ENOMEM;
|
||||
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
|
||||
|
||||
for (i = 0; i < info->read_mmr_reg.count; i++)
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
for (i = 0; i < info->read_mmr_reg.count; i++) {
|
||||
if (amdgpu_asic_read_register(adev, se_num, sh_num,
|
||||
info->read_mmr_reg.dword_offset + i,
|
||||
®s[i])) {
|
||||
DRM_DEBUG_KMS("unallowed offset %#x\n",
|
||||
info->read_mmr_reg.dword_offset + i);
|
||||
kfree(regs);
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
n = copy_to_user(out, regs, min(size, alloc_size));
|
||||
kfree(regs);
|
||||
return n ? -EFAULT : 0;
|
||||
|
|
|
@ -758,6 +758,12 @@ static int psp_ras_terminate(struct psp_context *psp)
|
|||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* TODO: bypass the terminate in sriov for now
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->ras.ras_initialized)
|
||||
return 0;
|
||||
|
||||
|
@ -779,6 +785,12 @@ static int psp_ras_initialize(struct psp_context *psp)
|
|||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* TODO: bypass the initialize in sriov for now
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->adev->psp.ta_ras_ucode_size ||
|
||||
!psp->adev->psp.ta_ras_start_addr) {
|
||||
dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
|
||||
|
@ -874,6 +886,12 @@ static int psp_hdcp_initialize(struct psp_context *psp)
|
|||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* TODO: bypass the initialize in sriov for now
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->adev->psp.ta_hdcp_ucode_size ||
|
||||
!psp->adev->psp.ta_hdcp_start_addr) {
|
||||
dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
|
||||
|
@ -962,6 +980,12 @@ static int psp_hdcp_terminate(struct psp_context *psp)
|
|||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* TODO: bypass the terminate in sriov for now
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->hdcp_context.hdcp_initialized)
|
||||
return 0;
|
||||
|
||||
|
@ -1053,6 +1077,12 @@ static int psp_dtm_initialize(struct psp_context *psp)
|
|||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* TODO: bypass the initialize in sriov for now
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->adev->psp.ta_dtm_ucode_size ||
|
||||
!psp->adev->psp.ta_dtm_start_addr) {
|
||||
dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
|
||||
|
@ -1111,6 +1141,12 @@ static int psp_dtm_terminate(struct psp_context *psp)
|
|||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* TODO: bypass the terminate in sriov for now
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->dtm_context.dtm_initialized)
|
||||
return 0;
|
||||
|
||||
|
@ -1431,7 +1467,10 @@ static int psp_np_fw_load(struct psp_context *psp)
|
|||
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
|
||||
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
|
||||
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
|
||||
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
|
||||
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
|
||||
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
|
||||
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
|
||||
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM))
|
||||
/*skip ucode loading in SRIOV VF */
|
||||
continue;
|
||||
|
||||
|
|
|
@ -473,7 +473,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
|
|||
TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
|
||||
TP_ARGS(sched_job, fence),
|
||||
TP_STRUCT__entry(
|
||||
__string(ring, sched_job->base.sched->name);
|
||||
__string(ring, sched_job->base.sched->name)
|
||||
__field(uint64_t, id)
|
||||
__field(struct dma_fence *, fence)
|
||||
__field(uint64_t, ctx)
|
||||
|
|
|
@ -1785,27 +1785,52 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
|
|||
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
|
||||
}
|
||||
|
||||
static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
|
||||
static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->in_gpu_reset) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj,
|
||||
(void **)&adev->gfx.rlc.cs_ptr);
|
||||
if (!r) {
|
||||
adev->gfx.rlc.funcs->get_csb_buffer(adev,
|
||||
adev->gfx.rlc.cs_ptr);
|
||||
amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
|
||||
}
|
||||
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* csib */
|
||||
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
|
||||
adev->gfx.rlc.clear_state_gpu_addr >> 32);
|
||||
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
|
||||
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
|
||||
WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
|
||||
static int gfx_v10_0_init_pg(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
int r;
|
||||
|
||||
gfx_v10_0_init_csb(adev);
|
||||
r = gfx_v10_0_init_csb(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < adev->num_vmhubs; i++)
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
|
||||
|
||||
/* TODO: init power gating */
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
|
||||
|
@ -1907,7 +1932,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
|
|||
r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
|
||||
if (r)
|
||||
return r;
|
||||
gfx_v10_0_init_pg(adev);
|
||||
|
||||
r = gfx_v10_0_init_pg(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* enable RLC SRM */
|
||||
gfx_v10_0_rlc_enable_srm(adev);
|
||||
|
@ -1933,7 +1961,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
gfx_v10_0_init_pg(adev);
|
||||
r = gfx_v10_0_init_pg(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->gfx.rlc.funcs->start(adev);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
|
||||
|
@ -2400,7 +2431,7 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
||||
static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int i;
|
||||
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
|
||||
|
@ -2413,7 +2444,17 @@ static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
|||
adev->gfx.gfx_ring[i].sched.ready = false;
|
||||
}
|
||||
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
|
||||
udelay(50);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
if (i >= adev->usec_timeout)
|
||||
DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
|
||||
|
@ -3114,6 +3155,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
|
|||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
|
||||
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
|
||||
|
||||
if (!adev->in_gpu_reset && !adev->in_suspend) {
|
||||
memset((void *)mqd, 0, sizeof(*mqd));
|
||||
|
@ -3125,12 +3167,12 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
|
|||
#endif
|
||||
nv_grbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
|
||||
memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd));
|
||||
if (adev->gfx.me.mqd_backup[mqd_idx])
|
||||
memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
|
||||
} else if (adev->in_gpu_reset) {
|
||||
/* reset mqd with the backup copy */
|
||||
if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
|
||||
memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd));
|
||||
if (adev->gfx.me.mqd_backup[mqd_idx])
|
||||
memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
|
||||
/* reset the ring */
|
||||
ring->wptr = 0;
|
||||
adev->wb.wb[ring->wptr_offs] = 0;
|
||||
|
|
|
@ -704,6 +704,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
|
||||
};
|
||||
|
||||
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
|
||||
|
@ -1051,8 +1052,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
|
|||
case CHIP_VEGA20:
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
|
||||
&&((adev->gfx.rlc_fw_version != 106 &&
|
||||
/* Disable GFXOFF on original raven. There are combinations
|
||||
* of sbios and platforms that are not stable.
|
||||
*/
|
||||
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
|
||||
&&((adev->gfx.rlc_fw_version != 106 &&
|
||||
adev->gfx.rlc_fw_version < 531) ||
|
||||
(adev->gfx.rlc_fw_version == 53815) ||
|
||||
(adev->gfx.rlc_feature_version < 1) ||
|
||||
|
|
|
@ -367,6 +367,8 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev)
|
|||
hub->ctx0_ptb_addr_hi32 =
|
||||
SOC15_REG_OFFSET(GC, 0,
|
||||
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
|
||||
hub->vm_inv_eng0_sem =
|
||||
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM);
|
||||
hub->vm_inv_eng0_req =
|
||||
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ);
|
||||
hub->vm_inv_eng0_ack =
|
||||
|
|
|
@ -356,6 +356,8 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev)
|
|||
hub->ctx0_ptb_addr_hi32 =
|
||||
SOC15_REG_OFFSET(GC, 0,
|
||||
mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
|
||||
hub->vm_inv_eng0_sem =
|
||||
SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
|
||||
hub->vm_inv_eng0_req =
|
||||
SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
|
||||
hub->vm_inv_eng0_ack =
|
||||
|
|
|
@ -235,6 +235,29 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
|
|||
const unsigned eng = 17;
|
||||
unsigned int i;
|
||||
|
||||
spin_lock(&adev->gmc.invalidate_lock);
|
||||
/*
|
||||
* It may lose gpuvm invalidate acknowldege state across power-gating
|
||||
* off cycle, add semaphore acquire before invalidation and semaphore
|
||||
* release after invalidation to avoid entering power gated state
|
||||
* to WA the Issue
|
||||
*/
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (vmhub == AMDGPU_MMHUB_0 ||
|
||||
vmhub == AMDGPU_MMHUB_1) {
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* a read return value of 1 means semaphore acuqire */
|
||||
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
|
||||
if (tmp & 0x1)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
if (i >= adev->usec_timeout)
|
||||
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
|
||||
}
|
||||
|
||||
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
|
||||
|
||||
/*
|
||||
|
@ -254,6 +277,17 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
|
|||
udelay(1);
|
||||
}
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (vmhub == AMDGPU_MMHUB_0 ||
|
||||
vmhub == AMDGPU_MMHUB_1)
|
||||
/*
|
||||
* add semaphore release after invalidation,
|
||||
* write with 0 means semaphore release
|
||||
*/
|
||||
WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
|
||||
|
||||
spin_unlock(&adev->gmc.invalidate_lock);
|
||||
|
||||
if (i < adev->usec_timeout)
|
||||
return;
|
||||
|
||||
|
@ -338,6 +372,20 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
|||
uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
|
||||
unsigned eng = ring->vm_inv_eng;
|
||||
|
||||
/*
|
||||
* It may lose gpuvm invalidate acknowldege state across power-gating
|
||||
* off cycle, add semaphore acquire before invalidation and semaphore
|
||||
* release after invalidation to avoid entering power gated state
|
||||
* to WA the Issue
|
||||
*/
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
|
||||
ring->funcs->vmhub == AMDGPU_MMHUB_1)
|
||||
/* a read return value of 1 means semaphore acuqire */
|
||||
amdgpu_ring_emit_reg_wait(ring,
|
||||
hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
|
||||
|
||||
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
|
||||
lower_32_bits(pd_addr));
|
||||
|
||||
|
@ -348,6 +396,15 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
|||
hub->vm_inv_eng0_ack + eng,
|
||||
req, 1 << vmid);
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
|
||||
ring->funcs->vmhub == AMDGPU_MMHUB_1)
|
||||
/*
|
||||
* add semaphore release after invalidation,
|
||||
* write with 0 means semaphore release
|
||||
*/
|
||||
amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
|
||||
|
||||
return pd_addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -459,6 +459,29 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
}
|
||||
|
||||
spin_lock(&adev->gmc.invalidate_lock);
|
||||
|
||||
/*
|
||||
* It may lose gpuvm invalidate acknowldege state across power-gating
|
||||
* off cycle, add semaphore acquire before invalidation and semaphore
|
||||
* release after invalidation to avoid entering power gated state
|
||||
* to WA the Issue
|
||||
*/
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (vmhub == AMDGPU_MMHUB_0 ||
|
||||
vmhub == AMDGPU_MMHUB_1) {
|
||||
for (j = 0; j < adev->usec_timeout; j++) {
|
||||
/* a read return value of 1 means semaphore acuqire */
|
||||
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
|
||||
if (tmp & 0x1)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
if (j >= adev->usec_timeout)
|
||||
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
|
||||
}
|
||||
|
||||
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
|
||||
|
||||
/*
|
||||
|
@ -474,7 +497,18 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (vmhub == AMDGPU_MMHUB_0 ||
|
||||
vmhub == AMDGPU_MMHUB_1)
|
||||
/*
|
||||
* add semaphore release after invalidation,
|
||||
* write with 0 means semaphore release
|
||||
*/
|
||||
WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
|
||||
|
||||
spin_unlock(&adev->gmc.invalidate_lock);
|
||||
|
||||
if (j < adev->usec_timeout)
|
||||
return;
|
||||
|
||||
|
@ -489,6 +523,20 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
|||
uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
|
||||
unsigned eng = ring->vm_inv_eng;
|
||||
|
||||
/*
|
||||
* It may lose gpuvm invalidate acknowldege state across power-gating
|
||||
* off cycle, add semaphore acquire before invalidation and semaphore
|
||||
* release after invalidation to avoid entering power gated state
|
||||
* to WA the Issue
|
||||
*/
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
|
||||
ring->funcs->vmhub == AMDGPU_MMHUB_1)
|
||||
/* a read return value of 1 means semaphore acuqire */
|
||||
amdgpu_ring_emit_reg_wait(ring,
|
||||
hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
|
||||
|
||||
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
|
||||
lower_32_bits(pd_addr));
|
||||
|
||||
|
@ -499,6 +547,15 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
|||
hub->vm_inv_eng0_ack + eng,
|
||||
req, 1 << vmid);
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
|
||||
ring->funcs->vmhub == AMDGPU_MMHUB_1)
|
||||
/*
|
||||
* add semaphore release after invalidation,
|
||||
* write with 0 means semaphore release
|
||||
*/
|
||||
amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
|
||||
|
||||
return pd_addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -420,6 +420,8 @@ void mmhub_v1_0_init(struct amdgpu_device *adev)
|
|||
hub->ctx0_ptb_addr_hi32 =
|
||||
SOC15_REG_OFFSET(MMHUB, 0,
|
||||
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
|
||||
hub->vm_inv_eng0_sem =
|
||||
SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
|
||||
hub->vm_inv_eng0_req =
|
||||
SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
|
||||
hub->vm_inv_eng0_ack =
|
||||
|
|
|
@ -348,6 +348,8 @@ void mmhub_v2_0_init(struct amdgpu_device *adev)
|
|||
hub->ctx0_ptb_addr_hi32 =
|
||||
SOC15_REG_OFFSET(MMHUB, 0,
|
||||
mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
|
||||
hub->vm_inv_eng0_sem =
|
||||
SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM);
|
||||
hub->vm_inv_eng0_req =
|
||||
SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ);
|
||||
hub->vm_inv_eng0_ack =
|
||||
|
|
|
@ -504,6 +504,10 @@ void mmhub_v9_4_init(struct amdgpu_device *adev)
|
|||
SOC15_REG_OFFSET(MMHUB, 0,
|
||||
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) +
|
||||
i * MMHUB_INSTANCE_REGISTER_OFFSET;
|
||||
hub[i]->vm_inv_eng0_sem =
|
||||
SOC15_REG_OFFSET(MMHUB, 0,
|
||||
mmVML2VC0_VM_INVALIDATE_ENG0_SEM) +
|
||||
i * MMHUB_INSTANCE_REGISTER_OFFSET;
|
||||
hub[i]->vm_inv_eng0_req =
|
||||
SOC15_REG_OFFSET(MMHUB, 0,
|
||||
mmVML2VC0_VM_INVALIDATE_ENG0_REQ) +
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "gc/gc_10_1_0_sh_mask.h"
|
||||
#include "hdp/hdp_5_0_0_offset.h"
|
||||
#include "hdp/hdp_5_0_0_sh_mask.h"
|
||||
#include "smuio/smuio_11_0_0_offset.h"
|
||||
|
||||
#include "soc15.h"
|
||||
#include "soc15_common.h"
|
||||
|
@ -156,8 +157,27 @@ static bool nv_read_disabled_bios(struct amdgpu_device *adev)
|
|||
static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
|
||||
u8 *bios, u32 length_bytes)
|
||||
{
|
||||
/* TODO: will implement it when SMU header is available */
|
||||
return false;
|
||||
u32 *dw_ptr;
|
||||
u32 i, length_dw;
|
||||
|
||||
if (bios == NULL)
|
||||
return false;
|
||||
if (length_bytes == 0)
|
||||
return false;
|
||||
/* APU vbios image is part of sbios image */
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return false;
|
||||
|
||||
dw_ptr = (u32 *)bios;
|
||||
length_dw = ALIGN(length_bytes, 4) / 4;
|
||||
|
||||
/* set rom index to 0 */
|
||||
WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
|
||||
/* read out the rom data */
|
||||
for (i = 0; i < length_dw; i++)
|
||||
dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
|
||||
|
|
|
@ -64,7 +64,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
|
|||
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
|
||||
|
||||
si_ih_disable_interrupts(adev);
|
||||
WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
|
||||
/* set dummy read address to dummy page address */
|
||||
WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
|
||||
interrupt_cntl = RREG32(INTERRUPT_CNTL);
|
||||
interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
|
||||
interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
|
||||
|
|
|
@ -28,8 +28,8 @@
|
|||
#include "nbio_v7_0.h"
|
||||
#include "nbio_v7_4.h"
|
||||
|
||||
#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4
|
||||
#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1
|
||||
#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6
|
||||
#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3
|
||||
|
||||
extern const struct amd_ip_funcs soc15_common_ip_funcs;
|
||||
|
||||
|
|
|
@ -293,7 +293,7 @@ static int vcn_v2_5_hw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring;
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
|
@ -305,8 +305,8 @@ static int vcn_v2_5_hw_fini(void *handle)
|
|||
|
||||
ring->sched.ready = false;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||
ring = &adev->vcn.inst[i].ring_enc[i];
|
||||
for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
|
||||
ring = &adev->vcn.inst[i].ring_enc[j];
|
||||
ring->sched.ready = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -719,7 +719,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
*/
|
||||
if (adev->flags & AMD_IS_APU &&
|
||||
adev->asic_type >= CHIP_CARRIZO &&
|
||||
adev->asic_type <= CHIP_RAVEN)
|
||||
adev->asic_type < CHIP_RAVEN)
|
||||
init_data.flags.gpu_vm_support = true;
|
||||
|
||||
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
|
||||
|
|
|
@ -36,7 +36,9 @@
|
|||
#include "dc_link_ddc.h"
|
||||
|
||||
#include "i2caux_interface.h"
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
#include "amdgpu_dm_debugfs.h"
|
||||
#endif
|
||||
/* #define TRACE_DPCD */
|
||||
|
||||
#ifdef TRACE_DPCD
|
||||
|
@ -147,6 +149,12 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
|
|||
to_amdgpu_dm_connector(connector);
|
||||
struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
connector_debugfs_init(amdgpu_dm_connector);
|
||||
amdgpu_dm_connector->debugfs_dpcd_address = 0;
|
||||
amdgpu_dm_connector->debugfs_dpcd_size = 0;
|
||||
#endif
|
||||
|
||||
return drm_dp_mst_connector_late_register(connector, port);
|
||||
}
|
||||
|
||||
|
|
|
@ -591,10 +591,18 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
|
|||
smu_table->power_play_table = smu_table->hardcode_pptable;
|
||||
smu_table->power_play_table_size = size;
|
||||
|
||||
/*
|
||||
* Special hw_fini action(for Navi1x, the DPMs disablement will be
|
||||
* skipped) may be needed for custom pptable uploading.
|
||||
*/
|
||||
smu->uploading_custom_pp_table = true;
|
||||
|
||||
ret = smu_reset(smu);
|
||||
if (ret)
|
||||
pr_info("smu reset failed, ret = %d\n", ret);
|
||||
|
||||
smu->uploading_custom_pp_table = false;
|
||||
|
||||
failed:
|
||||
mutex_unlock(&smu->mutex);
|
||||
return ret;
|
||||
|
@ -719,6 +727,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
|
|||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA20:
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
vega20_set_ppt_funcs(smu);
|
||||
break;
|
||||
case CHIP_NAVI10:
|
||||
|
@ -727,6 +736,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
|
|||
navi10_set_ppt_funcs(smu);
|
||||
break;
|
||||
case CHIP_ARCTURUS:
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
arcturus_set_ppt_funcs(smu);
|
||||
/* OD is not supported on Arcturus */
|
||||
smu->od_enabled =false;
|
||||
|
@ -1295,10 +1305,25 @@ static int smu_hw_fini(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_stop_dpms(smu);
|
||||
if (ret) {
|
||||
pr_warn("Fail to stop Dpms!\n");
|
||||
return ret;
|
||||
/*
|
||||
* For custom pptable uploading, skip the DPM features
|
||||
* disable process on Navi1x ASICs.
|
||||
* - As the gfx related features are under control of
|
||||
* RLC on those ASICs. RLC reinitialization will be
|
||||
* needed to reenable them. That will cost much more
|
||||
* efforts.
|
||||
*
|
||||
* - SMU firmware can handle the DPM reenablement
|
||||
* properly.
|
||||
*/
|
||||
if (!smu->uploading_custom_pp_table ||
|
||||
!((adev->asic_type >= CHIP_NAVI10) &&
|
||||
(adev->asic_type <= CHIP_NAVI12))) {
|
||||
ret = smu_stop_dpms(smu);
|
||||
if (ret) {
|
||||
pr_warn("Fail to stop Dpms!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(table_context->driver_pptable);
|
||||
|
|
|
@ -81,6 +81,8 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
|
|||
|
||||
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
|
||||
if (!hwmgr)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -94,8 +96,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
|
|||
hwmgr_init_workload_prority(hwmgr);
|
||||
hwmgr->gfxoff_state_changed_by_workload = false;
|
||||
|
||||
adev = hwmgr->adev;
|
||||
|
||||
switch (hwmgr->chip_family) {
|
||||
case AMDGPU_FAMILY_CI:
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
hwmgr->smumgr_funcs = &ci_smu_funcs;
|
||||
ci_set_asic_special_caps(hwmgr);
|
||||
hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
|
||||
|
@ -106,12 +111,14 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
|
|||
smu7_init_function_pointers(hwmgr);
|
||||
break;
|
||||
case AMDGPU_FAMILY_CZ:
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
hwmgr->od_enabled = false;
|
||||
hwmgr->smumgr_funcs = &smu8_smu_funcs;
|
||||
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
|
||||
smu8_init_function_pointers(hwmgr);
|
||||
break;
|
||||
case AMDGPU_FAMILY_VI:
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
|
||||
switch (hwmgr->chip_id) {
|
||||
case CHIP_TOPAZ:
|
||||
|
@ -153,6 +160,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
|
|||
case AMDGPU_FAMILY_AI:
|
||||
switch (hwmgr->chip_id) {
|
||||
case CHIP_VEGA10:
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
|
||||
hwmgr->smumgr_funcs = &vega10_smu_funcs;
|
||||
vega10_hwmgr_init(hwmgr);
|
||||
|
@ -162,6 +170,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
|
|||
vega12_hwmgr_init(hwmgr);
|
||||
break;
|
||||
case CHIP_VEGA20:
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
|
||||
hwmgr->smumgr_funcs = &vega20_smu_funcs;
|
||||
vega20_hwmgr_init(hwmgr);
|
||||
|
|
|
@ -3477,18 +3477,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
|
|||
|
||||
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
int i;
|
||||
u32 tmp = 0;
|
||||
|
||||
if (!query)
|
||||
return -EINVAL;
|
||||
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
|
||||
tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
|
||||
*query = tmp;
|
||||
/*
|
||||
* PPSMC_MSG_GetCurrPkgPwr is not supported on:
|
||||
* - Hawaii
|
||||
* - Bonaire
|
||||
* - Fiji
|
||||
* - Tonga
|
||||
*/
|
||||
if ((adev->asic_type != CHIP_HAWAII) &&
|
||||
(adev->asic_type != CHIP_BONAIRE) &&
|
||||
(adev->asic_type != CHIP_FIJI) &&
|
||||
(adev->asic_type != CHIP_TONGA)) {
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
|
||||
tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
|
||||
*query = tmp;
|
||||
|
||||
if (tmp != 0)
|
||||
return 0;
|
||||
if (tmp != 0)
|
||||
return 0;
|
||||
}
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
|
||||
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
|
||||
|
|
|
@ -390,6 +390,7 @@ struct smu_context
|
|||
|
||||
uint32_t smc_if_version;
|
||||
|
||||
bool uploading_custom_pp_table;
|
||||
};
|
||||
|
||||
struct i2c_adapter;
|
||||
|
|
|
@ -859,6 +859,12 @@ static int navi10_force_clk_levels(struct smu_context *smu,
|
|||
case SMU_UCLK:
|
||||
case SMU_DCEFCLK:
|
||||
case SMU_FCLK:
|
||||
/* There is only 2 levels for fine grained DPM */
|
||||
if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
|
||||
soft_max_level = (soft_max_level >= 1 ? 1 : 0);
|
||||
soft_min_level = (soft_min_level >= 1 ? 1 : 0);
|
||||
}
|
||||
|
||||
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
|
||||
if (ret)
|
||||
return size;
|
||||
|
@ -1980,6 +1986,17 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int navi10_run_btc(struct smu_context *smu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
|
||||
if (ret)
|
||||
pr_err("RunBtc failed!\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct pptable_funcs navi10_ppt_funcs = {
|
||||
.tables_init = navi10_tables_init,
|
||||
.alloc_dpm_context = navi10_allocate_dpm_context,
|
||||
|
@ -2071,6 +2088,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
|||
.set_default_od_settings = navi10_set_default_od_settings,
|
||||
.od_edit_dpm_table = navi10_od_edit_dpm_table,
|
||||
.get_pptable_power_limit = navi10_get_pptable_power_limit,
|
||||
.run_btc = navi10_run_btc,
|
||||
};
|
||||
|
||||
void navi10_set_ppt_funcs(struct smu_context *smu)
|
||||
|
|
|
@ -6965,8 +6965,8 @@ static int cik_irq_init(struct radeon_device *rdev)
|
|||
}
|
||||
|
||||
/* setup interrupt control */
|
||||
/* XXX this should actually be a bus address, not an MC address. same on older asics */
|
||||
WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
|
||||
/* set dummy read address to dummy page address */
|
||||
WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
|
||||
interrupt_cntl = RREG32(INTERRUPT_CNTL);
|
||||
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
|
||||
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
|
||||
|
|
|
@ -3696,8 +3696,8 @@ int r600_irq_init(struct radeon_device *rdev)
|
|||
}
|
||||
|
||||
/* setup interrupt control */
|
||||
/* set dummy read address to ring address */
|
||||
WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
|
||||
/* set dummy read address to dummy page address */
|
||||
WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
|
||||
interrupt_cntl = RREG32(INTERRUPT_CNTL);
|
||||
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
|
||||
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
|
||||
|
|
|
@ -5997,8 +5997,8 @@ static int si_irq_init(struct radeon_device *rdev)
|
|||
}
|
||||
|
||||
/* setup interrupt control */
|
||||
/* set dummy read address to ring address */
|
||||
WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
|
||||
/* set dummy read address to dummy page address */
|
||||
WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
|
||||
interrupt_cntl = RREG32(INTERRUPT_CNTL);
|
||||
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
|
||||
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
|
||||
|
|
Loading…
Reference in New Issue