2018-11-29 14:01:47 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2019 Advanced Micro Devices, Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "pp_debug.h"
|
|
|
|
#include <linux/firmware.h>
|
|
|
|
#include "amdgpu.h"
|
|
|
|
#include "amdgpu_smu.h"
|
2019-01-21 14:06:52 +08:00
|
|
|
#include "atomfirmware.h"
|
2018-12-14 17:47:20 +08:00
|
|
|
#include "amdgpu_atomfirmware.h"
|
2018-12-02 18:25:00 +08:00
|
|
|
#include "smu_v11_0.h"
|
2018-12-12 11:21:16 +08:00
|
|
|
#include "smu11_driver_if.h"
|
2018-12-11 17:16:10 +08:00
|
|
|
#include "soc15_common.h"
|
2018-12-17 14:56:40 +08:00
|
|
|
#include "atom.h"
|
2018-12-18 20:23:17 +08:00
|
|
|
#include "vega20_ppt.h"
|
2019-01-04 16:23:23 +08:00
|
|
|
#include "pp_thermal.h"
|
2018-12-11 17:16:10 +08:00
|
|
|
|
|
|
|
#include "asic_reg/thm/thm_11_0_2_offset.h"
|
|
|
|
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
|
|
|
|
#include "asic_reg/mp/mp_9_0_offset.h"
|
|
|
|
#include "asic_reg/mp/mp_9_0_sh_mask.h"
|
|
|
|
#include "asic_reg/nbio/nbio_7_4_offset.h"
|
2018-11-29 14:01:47 +08:00
|
|
|
|
2018-12-06 13:44:29 +08:00
|
|
|
MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
|
|
|
|
|
2018-12-12 19:42:53 +08:00
|
|
|
#define SMU11_TOOL_SIZE 0x19000
|
2019-01-04 14:13:09 +08:00
|
|
|
#define SMU11_THERMAL_MINIMUM_ALERT_TEMP 0
|
|
|
|
#define SMU11_THERMAL_MAXIMUM_ALERT_TEMP 255
|
2018-12-12 19:42:53 +08:00
|
|
|
|
2018-12-11 17:16:10 +08:00
|
|
|
static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
|
|
|
|
uint16_t msg)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = smu->adev;
|
|
|
|
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-12 11:21:16 +08:00
|
|
|
static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = smu->adev;
|
|
|
|
|
|
|
|
*arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-11 17:16:10 +08:00
|
|
|
static int smu_v11_0_wait_for_response(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = smu->adev;
|
|
|
|
uint32_t cur_value, i;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->usec_timeout; i++) {
|
|
|
|
cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
|
|
|
|
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
|
|
|
|
break;
|
|
|
|
udelay(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* timeout means wrong logic */
|
|
|
|
if (i == adev->usec_timeout)
|
|
|
|
return -ETIME;
|
|
|
|
|
2018-12-19 16:27:55 +08:00
|
|
|
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
|
2018-12-11 17:16:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = smu->adev;
|
2018-12-19 16:23:23 +08:00
|
|
|
int ret = 0, index = 0;
|
|
|
|
|
|
|
|
index = smu_msg_get_index(smu, msg);
|
|
|
|
if (index < 0)
|
|
|
|
return index;
|
2018-12-11 17:16:10 +08:00
|
|
|
|
|
|
|
smu_v11_0_wait_for_response(smu);
|
|
|
|
|
|
|
|
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
|
|
|
|
|
2018-12-19 16:23:23 +08:00
|
|
|
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
|
2018-12-11 17:16:10 +08:00
|
|
|
|
|
|
|
ret = smu_v11_0_wait_for_response(smu);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
pr_err("Failed to send message 0x%x, response 0x%x\n", msg,
|
|
|
|
ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
|
|
|
|
uint32_t param)
|
|
|
|
{
|
|
|
|
|
|
|
|
struct amdgpu_device *adev = smu->adev;
|
2018-12-19 16:23:23 +08:00
|
|
|
int ret = 0, index = 0;
|
|
|
|
|
|
|
|
index = smu_msg_get_index(smu, msg);
|
|
|
|
if (index < 0)
|
|
|
|
return index;
|
2018-12-11 17:16:10 +08:00
|
|
|
|
|
|
|
ret = smu_v11_0_wait_for_response(smu);
|
|
|
|
if (ret)
|
|
|
|
pr_err("Failed to send message 0x%x, response 0x%x\n", msg,
|
|
|
|
ret);
|
|
|
|
|
|
|
|
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
|
|
|
|
|
|
|
|
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
|
|
|
|
|
2018-12-19 16:23:23 +08:00
|
|
|
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
|
2018-12-11 17:16:10 +08:00
|
|
|
|
|
|
|
ret = smu_v11_0_wait_for_response(smu);
|
|
|
|
if (ret)
|
|
|
|
pr_err("Failed to send message 0x%x, response 0x%x\n", msg,
|
|
|
|
ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-11-29 14:01:47 +08:00
|
|
|
static int smu_v11_0_init_microcode(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = smu->adev;
|
2018-12-06 13:44:29 +08:00
|
|
|
const char *chip_name;
|
|
|
|
char fw_name[30];
|
|
|
|
int err = 0;
|
|
|
|
const struct smc_firmware_header_v1_0 *hdr;
|
|
|
|
const struct common_firmware_header *header;
|
|
|
|
struct amdgpu_firmware_info *ucode = NULL;
|
2018-11-29 14:01:47 +08:00
|
|
|
|
2018-12-06 13:44:29 +08:00
|
|
|
switch (adev->asic_type) {
|
|
|
|
case CHIP_VEGA20:
|
|
|
|
chip_name = "vega20";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
|
|
|
|
|
|
|
|
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
err = amdgpu_ucode_validate(adev->pm.fw);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
|
|
|
|
amdgpu_ucode_print_smc_hdr(&hdr->header);
|
|
|
|
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
|
|
|
|
|
|
|
|
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
|
|
|
ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
|
|
|
|
ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
|
|
|
|
ucode->fw = adev->pm.fw;
|
|
|
|
header = (const struct common_firmware_header *)ucode->fw->data;
|
|
|
|
adev->firmware.fw_size +=
|
|
|
|
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (err) {
|
|
|
|
DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
|
|
|
|
fw_name);
|
|
|
|
release_firmware(adev->pm.fw);
|
|
|
|
adev->pm.fw = NULL;
|
|
|
|
}
|
|
|
|
return err;
|
2018-11-29 14:01:47 +08:00
|
|
|
}
|
|
|
|
|
2018-12-02 18:12:10 +08:00
|
|
|
static int smu_v11_0_load_microcode(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-02 18:25:00 +08:00
|
|
|
static int smu_v11_0_check_fw_status(struct smu_context *smu)
|
|
|
|
{
|
2018-12-10 13:31:56 +08:00
|
|
|
struct amdgpu_device *adev = smu->adev;
|
|
|
|
uint32_t mp1_fw_flags;
|
|
|
|
|
|
|
|
WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
|
|
|
|
(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
|
|
|
|
|
|
|
|
mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
|
|
|
|
|
|
|
|
if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
|
|
|
|
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
|
|
|
|
return 0;
|
|
|
|
return -EIO;
|
2018-12-02 18:25:00 +08:00
|
|
|
}
|
|
|
|
|
2018-12-12 11:21:16 +08:00
|
|
|
static int smu_v11_0_check_fw_version(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
uint32_t smu_version = 0xff;
|
|
|
|
int ret = 0;
|
|
|
|
|
2018-12-19 16:27:55 +08:00
|
|
|
ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
|
2018-12-12 11:21:16 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2018-12-20 20:09:00 +08:00
|
|
|
ret = smu_read_smc_arg(smu, &smu_version);
|
2018-12-12 11:21:16 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (smu_version == SMU11_DRIVER_IF_VERSION)
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-14 17:47:20 +08:00
|
|
|
static int smu_v11_0_read_pptable_from_vbios(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret, index;
|
|
|
|
uint16_t size;
|
|
|
|
uint8_t frev, crev;
|
2018-12-18 10:58:17 +08:00
|
|
|
void *table;
|
2018-12-14 17:47:20 +08:00
|
|
|
|
|
|
|
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
|
|
|
powerplayinfo);
|
|
|
|
|
|
|
|
ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
|
|
|
|
(uint8_t **)&table);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-01-11 15:07:52 +08:00
|
|
|
if (!smu->smu_table.power_play_table)
|
|
|
|
smu->smu_table.power_play_table = table;
|
|
|
|
if (!smu->smu_table.power_play_table_size)
|
|
|
|
smu->smu_table.power_play_table_size = size;
|
2018-12-14 17:47:20 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-17 19:48:59 +08:00
|
|
|
static int smu_v11_0_init_dpm_context(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
|
|
|
|
|
|
|
|
if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-12-26 11:07:57 +08:00
|
|
|
return smu_alloc_dpm_context(smu);
|
2018-12-17 19:48:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
|
|
|
|
|
|
|
|
if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
kfree(smu_dpm->dpm_context);
|
|
|
|
smu_dpm->dpm_context = NULL;
|
|
|
|
smu_dpm->dpm_context_size = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-14 17:43:57 +08:00
|
|
|
static int smu_v11_0_init_smc_tables(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct smu_table_context *smu_table = &smu->smu_table;
|
|
|
|
struct smu_table *tables = NULL;
|
2018-12-17 19:48:59 +08:00
|
|
|
int ret = 0;
|
2018-12-14 17:43:57 +08:00
|
|
|
|
|
|
|
if (smu_table->tables || smu_table->table_count != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
tables = kcalloc(TABLE_COUNT, sizeof(struct smu_table), GFP_KERNEL);
|
|
|
|
if (!tables)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
smu_table->tables = tables;
|
|
|
|
smu_table->table_count = TABLE_COUNT;
|
|
|
|
|
|
|
|
SMU_TABLE_INIT(tables, TABLE_PPTABLE, sizeof(PPTable_t),
|
|
|
|
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
|
|
|
|
SMU_TABLE_INIT(tables, TABLE_WATERMARKS, sizeof(Watermarks_t),
|
|
|
|
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
|
|
|
|
SMU_TABLE_INIT(tables, TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
|
|
|
|
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
|
|
|
|
SMU_TABLE_INIT(tables, TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
|
|
|
|
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
|
2018-12-12 19:42:53 +08:00
|
|
|
SMU_TABLE_INIT(tables, TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE,
|
|
|
|
AMDGPU_GEM_DOMAIN_VRAM);
|
2018-12-14 17:43:57 +08:00
|
|
|
|
2018-12-17 19:48:59 +08:00
|
|
|
ret = smu_v11_0_init_dpm_context(smu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-12-14 17:43:57 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct smu_table_context *smu_table = &smu->smu_table;
|
2018-12-17 19:48:59 +08:00
|
|
|
int ret = 0;
|
2018-12-14 17:43:57 +08:00
|
|
|
|
|
|
|
if (!smu_table->tables || smu_table->table_count == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
kfree(smu_table->tables);
|
|
|
|
smu_table->tables = NULL;
|
|
|
|
smu_table->table_count = 0;
|
|
|
|
|
2018-12-17 19:48:59 +08:00
|
|
|
ret = smu_v11_0_fini_dpm_context(smu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-12-14 17:43:57 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
2018-12-15 10:50:03 +08:00
|
|
|
|
|
|
|
static int smu_v11_0_init_power(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct smu_power_context *smu_power = &smu->smu_power;
|
|
|
|
|
|
|
|
if (smu_power->power_context || smu_power->power_context_size != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!smu_power->power_context)
|
|
|
|
return -ENOMEM;
|
|
|
|
smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int smu_v11_0_fini_power(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct smu_power_context *smu_power = &smu->smu_power;
|
|
|
|
|
|
|
|
if (!smu_power->power_context || smu_power->power_context_size == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
kfree(smu_power->power_context);
|
|
|
|
smu_power->power_context = NULL;
|
|
|
|
smu_power->power_context_size = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-17 10:25:30 +08:00
|
|
|
int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret, index;
|
|
|
|
uint16_t size;
|
|
|
|
uint8_t frev, crev;
|
|
|
|
struct atom_common_table_header *header;
|
|
|
|
struct atom_firmware_info_v3_3 *v_3_3;
|
|
|
|
struct atom_firmware_info_v3_1 *v_3_1;
|
|
|
|
|
|
|
|
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
|
|
|
firmwareinfo);
|
|
|
|
|
|
|
|
ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
|
|
|
|
(uint8_t **)&header);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (header->format_revision != 3) {
|
|
|
|
pr_err("unknown atom_firmware_info version! for smu11\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (header->content_revision) {
|
|
|
|
case 0:
|
|
|
|
case 1:
|
|
|
|
case 2:
|
|
|
|
v_3_1 = (struct atom_firmware_info_v3_1 *)header;
|
|
|
|
smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
|
|
|
|
smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
|
|
|
|
smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
|
|
|
|
smu->smu_table.boot_values.socclk = 0;
|
|
|
|
smu->smu_table.boot_values.dcefclk = 0;
|
|
|
|
smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
|
|
|
|
smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
|
|
|
|
smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
|
|
|
|
smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
|
|
|
|
smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
|
|
|
|
smu->smu_table.boot_values.pp_table_id = 0;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
default:
|
|
|
|
v_3_3 = (struct atom_firmware_info_v3_3 *)header;
|
|
|
|
smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
|
|
|
|
smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
|
|
|
|
smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
|
|
|
|
smu->smu_table.boot_values.socclk = 0;
|
|
|
|
smu->smu_table.boot_values.dcefclk = 0;
|
|
|
|
smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
|
|
|
|
smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
|
|
|
|
smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
|
|
|
|
smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
|
|
|
|
smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
|
|
|
|
smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-17 14:56:40 +08:00
|
|
|
static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret, index;
|
|
|
|
struct amdgpu_device *adev = smu->adev;
|
|
|
|
struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
|
|
|
|
struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
|
|
|
|
|
|
|
|
input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
|
|
|
|
input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
|
|
|
|
index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
|
|
|
|
getsmuclockinfo);
|
|
|
|
|
|
|
|
ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
|
|
|
|
(uint32_t *)&input);
|
|
|
|
if (ret)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
|
|
|
|
smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
|
|
|
|
|
|
|
|
memset(&input, 0, sizeof(input));
|
|
|
|
input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
|
|
|
|
input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
|
|
|
|
index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
|
|
|
|
getsmuclockinfo);
|
|
|
|
|
|
|
|
ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
|
|
|
|
(uint32_t *)&input);
|
|
|
|
if (ret)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
|
|
|
|
smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-18 14:06:09 +08:00
|
|
|
static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct smu_table_context *smu_table = &smu->smu_table;
|
|
|
|
struct smu_table *memory_pool = &smu_table->memory_pool;
|
|
|
|
int ret = 0;
|
|
|
|
uint64_t address;
|
|
|
|
uint32_t address_low, address_high;
|
|
|
|
|
|
|
|
if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
address = (uint64_t)memory_pool->cpu_addr;
|
|
|
|
address_high = (uint32_t)upper_32_bits(address);
|
|
|
|
address_low = (uint32_t)lower_32_bits(address);
|
|
|
|
|
|
|
|
ret = smu_send_smc_msg_with_param(smu,
|
2018-12-19 16:27:55 +08:00
|
|
|
SMU_MSG_SetSystemVirtualDramAddrHigh,
|
2018-12-18 14:06:09 +08:00
|
|
|
address_high);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
ret = smu_send_smc_msg_with_param(smu,
|
2018-12-19 16:27:55 +08:00
|
|
|
SMU_MSG_SetSystemVirtualDramAddrLow,
|
2018-12-18 14:06:09 +08:00
|
|
|
address_low);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
address = memory_pool->mc_address;
|
|
|
|
address_high = (uint32_t)upper_32_bits(address);
|
|
|
|
address_low = (uint32_t)lower_32_bits(address);
|
|
|
|
|
2018-12-19 16:27:55 +08:00
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
|
2018-12-18 14:06:09 +08:00
|
|
|
address_high);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-12-19 16:27:55 +08:00
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
|
2018-12-18 14:06:09 +08:00
|
|
|
address_low);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-12-19 16:27:55 +08:00
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
|
2018-12-18 14:06:09 +08:00
|
|
|
(uint32_t)memory_pool->size);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-14 16:18:21 +08:00
|
|
|
static int smu_v11_0_check_pptable(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = smu_check_powerplay_table(smu);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-18 22:56:48 +08:00
|
|
|
static int smu_v11_0_parse_pptable(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
struct smu_table_context *table_context = &smu->smu_table;
|
|
|
|
|
|
|
|
if (table_context->driver_pptable)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
table_context->driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!table_context->driver_pptable)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = smu_store_powerplay_table(smu);
|
2018-12-20 23:06:08 +08:00
|
|
|
if (ret)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = smu_append_powerplay_table(smu);
|
2018-12-18 22:56:48 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-13 10:31:14 +08:00
|
|
|
static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
|
|
|
|
{
|
2018-12-20 20:31:55 +08:00
|
|
|
int ret;
|
2018-12-13 10:31:14 +08:00
|
|
|
|
2018-12-20 20:31:55 +08:00
|
|
|
ret = smu_set_default_dpm_table(smu);
|
2018-12-13 10:31:14 +08:00
|
|
|
|
2018-12-20 20:31:55 +08:00
|
|
|
return ret;
|
2018-12-13 10:31:14 +08:00
|
|
|
}
|
|
|
|
|
2018-12-13 19:22:30 +08:00
|
|
|
static int smu_v11_0_copy_table_to_smc(struct smu_context *smu,
|
|
|
|
uint32_t table_id)
|
|
|
|
{
|
|
|
|
struct smu_table_context *table_context = &smu->smu_table;
|
|
|
|
struct smu_table *driver_pptable = &smu->smu_table.tables[table_id];
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (table_id >= TABLE_COUNT) {
|
|
|
|
pr_err("Invalid SMU Table ID for smu11!");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!driver_pptable->cpu_addr) {
|
|
|
|
pr_err("Invalid virtual address for smu11!");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (!driver_pptable->mc_address) {
|
|
|
|
pr_err("Invalid MC address for smu11!");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (!driver_pptable->size) {
|
|
|
|
pr_err("Invalid SMU Table size for smu11!");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(driver_pptable->cpu_addr, table_context->driver_pptable,
|
|
|
|
driver_pptable->size);
|
|
|
|
|
2018-12-19 16:27:55 +08:00
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
|
2018-12-13 19:22:30 +08:00
|
|
|
upper_32_bits(driver_pptable->mc_address));
|
|
|
|
if (ret) {
|
|
|
|
pr_err("[CopyTableToSMC] Attempt to Set Dram Addr High Failed!");
|
|
|
|
return ret;
|
|
|
|
}
|
2018-12-19 16:27:55 +08:00
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
|
2018-12-13 19:22:30 +08:00
|
|
|
lower_32_bits(driver_pptable->mc_address));
|
|
|
|
if (ret) {
|
|
|
|
pr_err("[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!");
|
|
|
|
return ret;
|
|
|
|
}
|
2018-12-19 16:27:55 +08:00
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_TransferTableDram2Smu,
|
2018-12-13 19:22:30 +08:00
|
|
|
table_id);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int smu_v11_0_write_pptable(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
ret = smu_v11_0_copy_table_to_smc(smu, TABLE_PPTABLE);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-12 19:31:31 +08:00
|
|
|
static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct smu_table_context *table_context = &smu->smu_table;
|
|
|
|
|
|
|
|
if (!table_context)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = smu_send_smc_msg_with_param(smu,
|
2018-12-19 16:27:55 +08:00
|
|
|
SMU_MSG_SetMinDeepSleepDcefclk,
|
2018-12-12 19:31:31 +08:00
|
|
|
table_context->boot_values.dcefclk / 100);
|
|
|
|
if (ret)
|
|
|
|
pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-12 19:42:53 +08:00
|
|
|
static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct smu_table *tool_table = &smu->smu_table.tables[TABLE_PMSTATUSLOG];
|
|
|
|
|
|
|
|
if (tool_table->mc_address) {
|
|
|
|
ret = smu_send_smc_msg_with_param(smu,
|
2018-12-19 16:27:55 +08:00
|
|
|
SMU_MSG_SetToolsDramAddrHigh,
|
2018-12-12 19:42:53 +08:00
|
|
|
upper_32_bits(tool_table->mc_address));
|
|
|
|
if (!ret)
|
|
|
|
ret = smu_send_smc_msg_with_param(smu,
|
2018-12-19 16:27:55 +08:00
|
|
|
SMU_MSG_SetToolsDramAddrLow,
|
2018-12-12 19:42:53 +08:00
|
|
|
lower_32_bits(tool_table->mc_address));
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-24 15:02:50 +08:00
|
|
|
static int smu_v11_0_init_display(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-26 17:36:25 +08:00
|
|
|
static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct smu_feature *feature = &smu->smu_feature;
|
|
|
|
int ret = 0;
|
|
|
|
uint32_t feature_mask[2];
|
|
|
|
|
|
|
|
if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
|
|
|
|
|
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
|
|
|
|
feature_mask[1]);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
|
|
|
|
feature_mask[0]);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
|
|
|
|
uint32_t *feature_mask, uint32_t num)
|
|
|
|
{
|
|
|
|
uint32_t feature_mask_high = 0, feature_mask_low = 0;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!feature_mask || num < 2)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
ret = smu_read_smc_arg(smu, &feature_mask_high);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
ret = smu_read_smc_arg(smu, &feature_mask_low);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
feature_mask[0] = feature_mask_low;
|
|
|
|
feature_mask[1] = feature_mask_high;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int smu_v11_0_enable_all_mask(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct smu_feature *feature = &smu->smu_feature;
|
|
|
|
uint32_t feature_mask[2];
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
ret = smu_send_smc_msg(smu, SMU_MSG_EnableAllSmuFeatures);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
|
|
|
|
feature->feature_num);
|
|
|
|
bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
|
|
|
|
feature->feature_num);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int smu_v11_0_disable_all_mask(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct smu_feature *feature = &smu->smu_feature;
|
|
|
|
uint32_t feature_mask[2];
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
ret = smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
|
|
|
|
feature->feature_num);
|
|
|
|
bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
|
|
|
|
feature->feature_num);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-26 16:14:55 +08:00
|
|
|
static int smu_v11_0_notify_display_change(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT))
|
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-25 16:34:39 +08:00
|
|
|
static int
|
|
|
|
smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
|
|
|
|
PPCLK_e clock_select)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
|
|
|
|
clock_select << 16);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = smu_read_smc_arg(smu, clock);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (*clock != 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* if DC limit is zero, return AC limit */
|
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
|
|
|
|
clock_select << 16);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = smu_read_smc_arg(smu, clock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
|
|
|
|
GFP_KERNEL);
|
|
|
|
smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
|
|
|
|
|
|
|
|
max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
|
|
|
|
max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
|
|
|
|
max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
|
|
|
|
max_sustainable_clocks->display_clock = 0xFFFFFFFF;
|
|
|
|
max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
|
|
|
|
max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
|
|
|
|
|
|
|
|
if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
|
|
|
|
ret = smu_v11_0_get_max_sustainable_clock(smu,
|
|
|
|
&(max_sustainable_clocks->uclock),
|
|
|
|
PPCLK_UCLK);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("[%s] failed to get max UCLK from SMC!",
|
|
|
|
__func__);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
|
|
|
|
ret = smu_v11_0_get_max_sustainable_clock(smu,
|
|
|
|
&(max_sustainable_clocks->soc_clock),
|
|
|
|
PPCLK_SOCCLK);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("[%s] failed to get max SOCCLK from SMC!",
|
|
|
|
__func__);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
|
|
|
|
ret = smu_v11_0_get_max_sustainable_clock(smu,
|
|
|
|
&(max_sustainable_clocks->dcef_clock),
|
|
|
|
PPCLK_DCEFCLK);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("[%s] failed to get max DCEFCLK from SMC!",
|
|
|
|
__func__);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = smu_v11_0_get_max_sustainable_clock(smu,
|
|
|
|
&(max_sustainable_clocks->display_clock),
|
|
|
|
PPCLK_DISPCLK);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("[%s] failed to get max DISPCLK from SMC!",
|
|
|
|
__func__);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
ret = smu_v11_0_get_max_sustainable_clock(smu,
|
|
|
|
&(max_sustainable_clocks->phy_clock),
|
|
|
|
PPCLK_PHYCLK);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("[%s] failed to get max PHYCLK from SMC!",
|
|
|
|
__func__);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
ret = smu_v11_0_get_max_sustainable_clock(smu,
|
|
|
|
&(max_sustainable_clocks->pixel_clock),
|
|
|
|
PPCLK_PIXCLK);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("[%s] failed to get max PIXCLK from SMC!",
|
|
|
|
__func__);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
|
|
|
|
max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-25 14:44:25 +08:00
|
|
|
static int smu_v11_0_get_power_limit(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
uint32_t power_limit_value;
|
|
|
|
|
|
|
|
ret = smu_send_smc_msg_with_param(smu,
|
|
|
|
SMU_MSG_GetPptLimit,
|
|
|
|
POWER_SOURCE_AC << 16);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("[GetPptLimit] get default PPT limit failed!");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
smu_read_smc_arg(smu, &power_limit_value);
|
|
|
|
smu->power_limit = smu->default_power_limit = power_limit_value;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-15 10:54:45 +08:00
|
|
|
static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_id, uint32_t *value)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
uint32_t freq;
|
|
|
|
|
|
|
|
if (clk_id >= PPCLK_COUNT || !value)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = smu_send_smc_msg_with_param(smu,
|
|
|
|
SMU_MSG_GetDpmClockFreq, (clk_id << 16));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = smu_read_smc_arg(smu, &freq);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
freq *= 100;
|
|
|
|
*value = freq;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-04 16:23:23 +08:00
|
|
|
static int smu_v11_0_get_thermal_range(struct smu_context *smu,
|
|
|
|
struct PP_TemperatureRange *range)
|
|
|
|
{
|
|
|
|
memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
|
|
|
|
|
|
|
|
range->max = smu->smu_table.software_shutdown_temp *
|
|
|
|
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-04 14:13:09 +08:00
|
|
|
static int smu_v11_0_set_thermal_range(struct smu_context *smu,
|
|
|
|
struct PP_TemperatureRange *range)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = smu->adev;
|
|
|
|
int low = SMU11_THERMAL_MINIMUM_ALERT_TEMP *
|
|
|
|
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
|
|
int high = SMU11_THERMAL_MAXIMUM_ALERT_TEMP *
|
|
|
|
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
|
|
uint32_t val;
|
|
|
|
|
|
|
|
if (low < range->min)
|
|
|
|
low = range->min;
|
|
|
|
if (high > range->max)
|
|
|
|
high = range->max;
|
|
|
|
|
|
|
|
if (low > high)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
|
|
|
|
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
|
|
|
|
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
|
|
|
|
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
|
|
|
|
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
|
|
|
|
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
|
|
|
|
|
|
|
|
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-04 14:36:35 +08:00
|
|
|
static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = smu->adev;
|
|
|
|
uint32_t val = 0;
|
|
|
|
|
|
|
|
val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
|
|
|
|
val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
|
|
|
|
val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
|
|
|
|
|
|
|
|
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-04 15:40:53 +08:00
|
|
|
static int smu_v11_0_set_thermal_fan_table(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct smu_table_context *table_context = &smu->smu_table;
|
|
|
|
PPTable_t *pptable = table_context->driver_pptable;
|
|
|
|
|
|
|
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
|
|
|
|
(uint32_t)pptable->FanTargetTemperature);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-04 16:00:48 +08:00
|
|
|
static int smu_v11_0_start_thermal_control(struct smu_context *smu)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct PP_TemperatureRange range;
|
|
|
|
struct amdgpu_device *adev = smu->adev;
|
|
|
|
|
|
|
|
smu_v11_0_get_thermal_range(smu, &range);
|
|
|
|
|
|
|
|
if (smu->smu_table.thermal_controller_type) {
|
|
|
|
ret = smu_v11_0_set_thermal_range(smu, &range);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = smu_v11_0_enable_thermal_alert(smu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
ret = smu_v11_0_set_thermal_fan_table(smu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
adev->pm.dpm.thermal.min_temp = range.min;
|
|
|
|
adev->pm.dpm.thermal.max_temp = range.max;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-11 14:51:24 +08:00
|
|
|
static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
|
|
|
|
uint32_t *value)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
SmuMetrics_t metrics;
|
|
|
|
|
|
|
|
if (!value)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
*value = metrics.AverageGfxActivity;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int smu_v11_0_read_sensor(struct smu_context *smu,
|
|
|
|
enum amd_pp_sensors sensor,
|
|
|
|
void *data, uint32_t *size)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
switch (sensor) {
|
|
|
|
case AMDGPU_PP_SENSOR_GPU_LOAD:
|
|
|
|
ret = smu_v11_0_get_current_activity_percent(smu,
|
|
|
|
(uint32_t *)data);
|
|
|
|
*size = 4;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
*size = 0;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-11-29 14:01:47 +08:00
|
|
|
static const struct smu_funcs smu_v11_0_funcs = {
|
|
|
|
.init_microcode = smu_v11_0_init_microcode,
|
2018-12-02 18:12:10 +08:00
|
|
|
.load_microcode = smu_v11_0_load_microcode,
|
2018-12-02 18:25:00 +08:00
|
|
|
.check_fw_status = smu_v11_0_check_fw_status,
|
2018-12-12 11:21:16 +08:00
|
|
|
.check_fw_version = smu_v11_0_check_fw_version,
|
2018-12-11 17:16:10 +08:00
|
|
|
.send_smc_msg = smu_v11_0_send_msg,
|
|
|
|
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
|
2018-12-20 20:09:00 +08:00
|
|
|
.read_smc_arg = smu_v11_0_read_arg,
|
2018-12-14 17:47:20 +08:00
|
|
|
.read_pptable_from_vbios = smu_v11_0_read_pptable_from_vbios,
|
2018-12-14 17:43:57 +08:00
|
|
|
.init_smc_tables = smu_v11_0_init_smc_tables,
|
|
|
|
.fini_smc_tables = smu_v11_0_fini_smc_tables,
|
2018-12-15 10:50:03 +08:00
|
|
|
.init_power = smu_v11_0_init_power,
|
|
|
|
.fini_power = smu_v11_0_fini_power,
|
2018-12-17 10:25:30 +08:00
|
|
|
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
|
2018-12-17 14:56:40 +08:00
|
|
|
.get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
|
2018-12-18 14:06:09 +08:00
|
|
|
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
|
2018-12-14 16:18:21 +08:00
|
|
|
.check_pptable = smu_v11_0_check_pptable,
|
2018-12-18 22:56:48 +08:00
|
|
|
.parse_pptable = smu_v11_0_parse_pptable,
|
2018-12-13 10:31:14 +08:00
|
|
|
.populate_smc_pptable = smu_v11_0_populate_smc_pptable,
|
2018-12-13 19:22:30 +08:00
|
|
|
.write_pptable = smu_v11_0_write_pptable,
|
2018-12-12 19:31:31 +08:00
|
|
|
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
|
2018-12-12 19:42:53 +08:00
|
|
|
.set_tool_table_location = smu_v11_0_set_tool_table_location,
|
2018-12-24 15:02:50 +08:00
|
|
|
.init_display = smu_v11_0_init_display,
|
2018-12-26 17:36:25 +08:00
|
|
|
.set_allowed_mask = smu_v11_0_set_allowed_mask,
|
|
|
|
.get_enabled_mask = smu_v11_0_get_enabled_mask,
|
|
|
|
.enable_all_mask = smu_v11_0_enable_all_mask,
|
|
|
|
.disable_all_mask = smu_v11_0_disable_all_mask,
|
2018-12-26 16:14:55 +08:00
|
|
|
.notify_display_change = smu_v11_0_notify_display_change,
|
2018-12-25 14:44:25 +08:00
|
|
|
.get_power_limit = smu_v11_0_get_power_limit,
|
2019-01-15 10:54:45 +08:00
|
|
|
.get_current_clk_freq = smu_v11_0_get_current_clk_freq,
|
2018-12-25 16:34:39 +08:00
|
|
|
.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
|
2019-01-04 16:00:48 +08:00
|
|
|
.start_thermal_control = smu_v11_0_start_thermal_control,
|
2019-01-11 14:51:24 +08:00
|
|
|
.read_sensor = smu_v11_0_read_sensor,
|
2018-11-29 14:01:47 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
void smu_v11_0_set_smu_funcs(struct smu_context *smu)
|
|
|
|
{
|
2018-12-18 20:23:17 +08:00
|
|
|
struct amdgpu_device *adev = smu->adev;
|
|
|
|
|
2018-11-29 14:01:47 +08:00
|
|
|
smu->funcs = &smu_v11_0_funcs;
|
2018-12-18 20:23:17 +08:00
|
|
|
|
|
|
|
switch (adev->asic_type) {
|
|
|
|
case CHIP_VEGA20:
|
|
|
|
vega20_set_ppt_funcs(smu);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_warn("Unknow asic for smu11\n");
|
|
|
|
}
|
2018-11-29 14:01:47 +08:00
|
|
|
}
|