mirror of https://gitee.com/openkylin/linux.git
Merge tag 'drm-next-5.5-2019-11-15' of git://people.freedesktop.org/~agd5f/linux into drm-next
drm-next-5.5-2019-11-15: amdgpu: - Fix AVFS handling on SMU7 parts with custom power tables - Enable Overdrive sysfs interface for Navi parts - Fix power limit handling on smu11 parts - Fix pcie link sysfs output for Navi - Probably cancel MM worker threads on shutdown radeon: - Cleanup for ppc change Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191115163516.3714-1-alexander.deucher@amd.com
This commit is contained in:
commit
c22fe762ba
|
@ -3110,6 +3110,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||
|
||||
DRM_INFO("amdgpu: finishing device.\n");
|
||||
adev->shutdown = true;
|
||||
|
||||
flush_delayed_work(&adev->delayed_init_work);
|
||||
|
||||
/* disable all interrupts */
|
||||
amdgpu_irq_disable_all(adev);
|
||||
if (adev->mode_info.mode_config_initialized){
|
||||
|
|
|
@ -567,7 +567,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
|
|||
struct ta_xgmi_shared_memory *xgmi_cmd;
|
||||
int ret;
|
||||
|
||||
if (!psp->adev->psp.ta_fw)
|
||||
if (!psp->adev->psp.ta_fw ||
|
||||
!psp->adev->psp.ta_xgmi_ucode_size ||
|
||||
!psp->adev->psp.ta_xgmi_start_addr)
|
||||
return -ENOENT;
|
||||
|
||||
if (!psp->xgmi_context.initialized) {
|
||||
|
@ -777,6 +779,12 @@ static int psp_ras_initialize(struct psp_context *psp)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!psp->adev->psp.ta_ras_ucode_size ||
|
||||
!psp->adev->psp.ta_ras_start_addr) {
|
||||
dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!psp->ras.ras_initialized) {
|
||||
ret = psp_ras_init_shared_buf(psp);
|
||||
if (ret)
|
||||
|
@ -866,6 +874,12 @@ static int psp_hdcp_initialize(struct psp_context *psp)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!psp->adev->psp.ta_hdcp_ucode_size ||
|
||||
!psp->adev->psp.ta_hdcp_start_addr) {
|
||||
dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!psp->hdcp_context.hdcp_initialized) {
|
||||
ret = psp_hdcp_init_shared_buf(psp);
|
||||
if (ret)
|
||||
|
@ -1039,6 +1053,12 @@ static int psp_dtm_initialize(struct psp_context *psp)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!psp->adev->psp.ta_dtm_ucode_size ||
|
||||
!psp->adev->psp.ta_dtm_start_addr) {
|
||||
dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!psp->dtm_context.dtm_initialized) {
|
||||
ret = psp_dtm_init_shared_buf(psp);
|
||||
if (ret)
|
||||
|
|
|
@ -299,6 +299,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
int i, j;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
drm_sched_entity_destroy(&adev->uvd.entity);
|
||||
|
||||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||
|
|
|
@ -216,6 +216,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
|||
if (adev->vce.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
drm_sched_entity_destroy(&adev->vce.entity);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
|
||||
|
|
|
@ -193,6 +193,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
int i, j;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
if (adev->vcn.indirect_sram) {
|
||||
amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
|
||||
&adev->vcn.dpg_sram_gpu_addr,
|
||||
|
|
|
@ -1068,10 +1068,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
|
|||
return ret;
|
||||
|
||||
if (adev->asic_type != CHIP_ARCTURUS) {
|
||||
ret = smu_override_pcie_parameters(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_notify_display_change(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1100,6 +1096,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (adev->asic_type != CHIP_ARCTURUS) {
|
||||
ret = smu_override_pcie_parameters(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_set_default_od_settings(smu, initialize);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1109,7 +1111,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_get_power_limit(smu, &smu->default_power_limit, true, false);
|
||||
ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -2511,3 +2513,13 @@ int smu_get_dpm_clock_table(struct smu_context *smu,
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
|
||||
{
|
||||
uint32_t ret = 0;
|
||||
|
||||
if (smu->ppt_funcs->get_pptable_power_limit)
|
||||
ret = smu->ppt_funcs->get_pptable_power_limit(smu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1261,15 +1261,14 @@ arcturus_get_profiling_clk_mask(struct smu_context *smu,
|
|||
|
||||
static int arcturus_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *limit,
|
||||
bool asic_default)
|
||||
bool cap)
|
||||
{
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
uint32_t asic_default_power_limit = 0;
|
||||
int ret = 0;
|
||||
int power_src;
|
||||
|
||||
if (!smu->default_power_limit ||
|
||||
!smu->power_limit) {
|
||||
if (!smu->power_limit) {
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
|
||||
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
|
||||
if (power_src < 0)
|
||||
|
@ -1292,17 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
|
|||
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
|
||||
}
|
||||
|
||||
if (smu->od_enabled) {
|
||||
asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
|
||||
asic_default_power_limit /= 100;
|
||||
}
|
||||
|
||||
smu->default_power_limit = asic_default_power_limit;
|
||||
smu->power_limit = asic_default_power_limit;
|
||||
}
|
||||
|
||||
if (asic_default)
|
||||
*limit = smu->default_power_limit;
|
||||
if (cap)
|
||||
*limit = smu_v11_0_get_max_power_limit(smu);
|
||||
else
|
||||
*limit = smu->power_limit;
|
||||
|
||||
|
@ -2070,6 +2063,13 @@ static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
|
|||
i2c_del_adapter(control);
|
||||
}
|
||||
|
||||
static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
|
||||
{
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
|
||||
return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
|
||||
}
|
||||
|
||||
static const struct pptable_funcs arcturus_ppt_funcs = {
|
||||
/* translate smu index into arcturus specific index */
|
||||
.get_smu_msg_index = arcturus_get_smu_msg_index,
|
||||
|
@ -2160,6 +2160,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
|
|||
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
|
||||
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
|
||||
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
|
||||
.get_pptable_power_limit = arcturus_get_pptable_power_limit,
|
||||
};
|
||||
|
||||
void arcturus_set_ppt_funcs(struct smu_context *smu)
|
||||
|
|
|
@ -3969,6 +3969,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
|
|||
"Failed to populate and upload SCLK MCLK DPM levels!",
|
||||
result = tmp_result);
|
||||
|
||||
/*
|
||||
* If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
|
||||
* That effectively disables AVFS feature.
|
||||
*/
|
||||
if (hwmgr->hardcode_pp_table != NULL)
|
||||
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
|
||||
|
||||
tmp_result = smu7_update_avfs(hwmgr);
|
||||
PP_ASSERT_WITH_CODE((0 == tmp_result),
|
||||
"Failed to update avfs voltages!",
|
||||
|
|
|
@ -261,7 +261,6 @@ struct smu_table_context
|
|||
struct smu_table *tables;
|
||||
struct smu_table memory_pool;
|
||||
uint8_t thermal_controller_type;
|
||||
uint16_t TDPODLimit;
|
||||
|
||||
void *overdrive_table;
|
||||
};
|
||||
|
@ -548,6 +547,7 @@ struct pptable_funcs {
|
|||
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
|
||||
int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
|
||||
int (*override_pcie_parameters)(struct smu_context *smu);
|
||||
uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
|
||||
};
|
||||
|
||||
int smu_load_microcode(struct smu_context *smu);
|
||||
|
@ -717,4 +717,6 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
|
|||
int smu_get_dpm_clock_table(struct smu_context *smu,
|
||||
struct dpm_clocks *clock_table);
|
||||
|
||||
uint32_t smu_get_pptable_power_limit(struct smu_context *smu);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -48,6 +48,8 @@
|
|||
|
||||
#define SMU11_TOOL_SIZE 0x19000
|
||||
|
||||
#define MAX_PCIE_CONF 2
|
||||
|
||||
#define CLK_MAP(clk, index) \
|
||||
[SMU_##clk] = {1, (index)}
|
||||
|
||||
|
@ -88,6 +90,11 @@ struct smu_11_0_dpm_table {
|
|||
uint32_t max; /* MHz */
|
||||
};
|
||||
|
||||
struct smu_11_0_pcie_table {
|
||||
uint8_t pcie_gen[MAX_PCIE_CONF];
|
||||
uint8_t pcie_lane[MAX_PCIE_CONF];
|
||||
};
|
||||
|
||||
struct smu_11_0_dpm_tables {
|
||||
struct smu_11_0_dpm_table soc_table;
|
||||
struct smu_11_0_dpm_table gfx_table;
|
||||
|
@ -100,6 +107,7 @@ struct smu_11_0_dpm_tables {
|
|||
struct smu_11_0_dpm_table display_table;
|
||||
struct smu_11_0_dpm_table phy_table;
|
||||
struct smu_11_0_dpm_table fclk_table;
|
||||
struct smu_11_0_pcie_table pcie_table;
|
||||
};
|
||||
|
||||
struct smu_11_0_dpm_context {
|
||||
|
@ -250,4 +258,8 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
|
|||
|
||||
int smu_v11_0_override_pcie_parameters(struct smu_context *smu);
|
||||
|
||||
int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size);
|
||||
|
||||
uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -141,7 +141,9 @@ struct smu_11_0_powerplay_table
|
|||
struct smu_11_0_power_saving_clock_table power_saving_clock;
|
||||
struct smu_11_0_overdrive_table overdrive_table;
|
||||
|
||||
#ifndef SMU_11_0_PARTIAL_PPTABLE
|
||||
PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
|
||||
#endif
|
||||
} __attribute__((packed));
|
||||
|
||||
#endif
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "navi10_ppt.h"
|
||||
#include "smu_v11_0_pptable.h"
|
||||
#include "smu_v11_0_ppsmc.h"
|
||||
#include "nbio/nbio_7_4_sh_mask.h"
|
||||
|
||||
#include "asic_reg/mp/mp_11_0_sh_mask.h"
|
||||
|
||||
|
@ -599,6 +600,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
|
||||
PPTable_t *driver_ppt = NULL;
|
||||
int i;
|
||||
|
||||
driver_ppt = table_context->driver_pptable;
|
||||
|
||||
|
@ -629,6 +631,11 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0];
|
||||
dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1];
|
||||
|
||||
for (i = 0; i < MAX_PCIE_CONF; i++) {
|
||||
dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i];
|
||||
dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -691,13 +698,29 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
|
|||
return dpm_desc->SnapToDiscrete == 0 ? true : false;
|
||||
}
|
||||
|
||||
static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
|
||||
{
|
||||
return od_table->cap[feature];
|
||||
}
|
||||
|
||||
|
||||
static int navi10_print_clk_levels(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
uint16_t *curve_settings;
|
||||
int i, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
uint32_t freq_values[3] = {0};
|
||||
uint32_t mark_index = 0;
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
uint32_t gen_speed, lane_width;
|
||||
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
|
||||
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
|
||||
OverDriveTable_t *od_table =
|
||||
(OverDriveTable_t *)table_context->overdrive_table;
|
||||
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
|
||||
|
||||
switch (clk_type) {
|
||||
case SMU_GFXCLK:
|
||||
|
@ -748,6 +771,69 @@ static int navi10_print_clk_levels(struct smu_context *smu,
|
|||
|
||||
}
|
||||
break;
|
||||
case SMU_PCIE:
|
||||
gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
|
||||
PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
|
||||
>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
|
||||
lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
|
||||
PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
|
||||
>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++)
|
||||
size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
|
||||
(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
|
||||
(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
|
||||
(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
|
||||
(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
|
||||
(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
|
||||
(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
|
||||
(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
|
||||
(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
|
||||
(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
|
||||
(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
|
||||
pptable->LclkFreq[i],
|
||||
(gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
|
||||
(lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
|
||||
"*" : "");
|
||||
break;
|
||||
case SMU_OD_SCLK:
|
||||
if (!smu->od_enabled || !od_table || !od_settings)
|
||||
break;
|
||||
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
|
||||
break;
|
||||
size += sprintf(buf + size, "OD_SCLK:\n");
|
||||
size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
|
||||
break;
|
||||
case SMU_OD_MCLK:
|
||||
if (!smu->od_enabled || !od_table || !od_settings)
|
||||
break;
|
||||
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
|
||||
break;
|
||||
size += sprintf(buf + size, "OD_MCLK:\n");
|
||||
size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax);
|
||||
break;
|
||||
case SMU_OD_VDDC_CURVE:
|
||||
if (!smu->od_enabled || !od_table || !od_settings)
|
||||
break;
|
||||
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE))
|
||||
break;
|
||||
size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
|
||||
for (i = 0; i < 3; i++) {
|
||||
switch (i) {
|
||||
case 0:
|
||||
curve_settings = &od_table->GfxclkFreq1;
|
||||
break;
|
||||
case 1:
|
||||
curve_settings = &od_table->GfxclkFreq2;
|
||||
break;
|
||||
case 2:
|
||||
curve_settings = &od_table->GfxclkFreq3;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1582,17 +1668,22 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t navi10_get_pptable_power_limit(struct smu_context *smu)
|
||||
{
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
|
||||
}
|
||||
|
||||
static int navi10_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *limit,
|
||||
bool asic_default)
|
||||
bool cap)
|
||||
{
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
uint32_t asic_default_power_limit = 0;
|
||||
int ret = 0;
|
||||
int power_src;
|
||||
|
||||
if (!smu->default_power_limit ||
|
||||
!smu->power_limit) {
|
||||
if (!smu->power_limit) {
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
|
||||
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
|
||||
if (power_src < 0)
|
||||
|
@ -1615,17 +1706,11 @@ static int navi10_get_power_limit(struct smu_context *smu,
|
|||
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
|
||||
}
|
||||
|
||||
if (smu->od_enabled) {
|
||||
asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
|
||||
asic_default_power_limit /= 100;
|
||||
}
|
||||
|
||||
smu->default_power_limit = asic_default_power_limit;
|
||||
smu->power_limit = asic_default_power_limit;
|
||||
}
|
||||
|
||||
if (asic_default)
|
||||
*limit = smu->default_power_limit;
|
||||
if (cap)
|
||||
*limit = smu_v11_0_get_max_power_limit(smu);
|
||||
else
|
||||
*limit = smu->power_limit;
|
||||
|
||||
|
@ -1640,6 +1725,9 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
|
|||
int ret, i;
|
||||
uint32_t smu_pcie_arg;
|
||||
|
||||
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
|
||||
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
|
||||
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
smu_pcie_arg = (i << 16) |
|
||||
((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
|
||||
|
@ -1648,11 +1736,249 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
|
|||
ret = smu_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
|
||||
dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
|
||||
if (pptable->PcieLaneCount[i] > pcie_width_cap)
|
||||
dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void navi10_dump_od_table(OverDriveTable_t *od_table) {
|
||||
pr_debug("OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
|
||||
pr_debug("OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
|
||||
pr_debug("OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
|
||||
pr_debug("OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
|
||||
pr_debug("OD: UclkFmax: %d\n", od_table->UclkFmax);
|
||||
pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct);
|
||||
}
|
||||
|
||||
static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value)
|
||||
{
|
||||
if (value < od_table->min[setting]) {
|
||||
pr_warn("OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (value > od_table->max[setting]) {
|
||||
pr_warn("OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int navi10_setup_od_limits(struct smu_context *smu) {
|
||||
struct smu_11_0_overdrive_table *overdrive_table = NULL;
|
||||
struct smu_11_0_powerplay_table *powerplay_table = NULL;
|
||||
|
||||
if (!smu->smu_table.power_play_table) {
|
||||
pr_err("powerplay table uninitialized!\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
powerplay_table = (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
|
||||
overdrive_table = &powerplay_table->overdrive_table;
|
||||
if (!smu->od_settings) {
|
||||
smu->od_settings = kmemdup(overdrive_table, sizeof(struct smu_11_0_overdrive_table), GFP_KERNEL);
|
||||
} else {
|
||||
memcpy(smu->od_settings, overdrive_table, sizeof(struct smu_11_0_overdrive_table));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
|
||||
OverDriveTable_t *od_table;
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (initialize) {
|
||||
ret = navi10_setup_od_limits(smu);
|
||||
if (ret) {
|
||||
pr_err("Failed to retrieve board OD limits\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
|
||||
if (od_table) {
|
||||
navi10_dump_od_table(od_table);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
|
||||
int i;
|
||||
int ret = 0;
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
OverDriveTable_t *od_table;
|
||||
struct smu_11_0_overdrive_table *od_settings;
|
||||
enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
|
||||
uint16_t *freq_ptr, *voltage_ptr;
|
||||
od_table = (OverDriveTable_t *)table_context->overdrive_table;
|
||||
|
||||
if (!smu->od_enabled) {
|
||||
pr_warn("OverDrive is not enabled!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!smu->od_settings) {
|
||||
pr_err("OD board limits are not set!\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
od_settings = smu->od_settings;
|
||||
|
||||
switch (type) {
|
||||
case PP_OD_EDIT_SCLK_VDDC_TABLE:
|
||||
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
|
||||
pr_warn("GFXCLK_LIMITS not supported!\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
if (!table_context->overdrive_table) {
|
||||
pr_err("Overdrive is not initialized\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < size; i += 2) {
|
||||
if (i + 2 > size) {
|
||||
pr_info("invalid number of input parameters %d\n", size);
|
||||
return -EINVAL;
|
||||
}
|
||||
switch (input[i]) {
|
||||
case 0:
|
||||
freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
|
||||
freq_ptr = &od_table->GfxclkFmin;
|
||||
if (input[i + 1] > od_table->GfxclkFmax) {
|
||||
pr_info("GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
|
||||
input[i + 1],
|
||||
od_table->GfxclkFmin);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
|
||||
freq_ptr = &od_table->GfxclkFmax;
|
||||
if (input[i + 1] < od_table->GfxclkFmin) {
|
||||
pr_info("GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
|
||||
input[i + 1],
|
||||
od_table->GfxclkFmax);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pr_info("Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
|
||||
pr_info("Supported indices: [0:min,1:max]\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = navi10_od_setting_check_range(od_settings, freq_setting, input[i + 1]);
|
||||
if (ret)
|
||||
return ret;
|
||||
*freq_ptr = input[i + 1];
|
||||
}
|
||||
break;
|
||||
case PP_OD_EDIT_MCLK_VDDC_TABLE:
|
||||
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
|
||||
pr_warn("UCLK_MAX not supported!\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
if (size < 2) {
|
||||
pr_info("invalid number of parameters: %d\n", size);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (input[0] != 1) {
|
||||
pr_info("Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
|
||||
pr_info("Supported indices: [1:max]\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = navi10_od_setting_check_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
|
||||
if (ret)
|
||||
return ret;
|
||||
od_table->UclkFmax = input[1];
|
||||
break;
|
||||
case PP_OD_COMMIT_DPM_TABLE:
|
||||
navi10_dump_od_table(od_table);
|
||||
ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
|
||||
if (ret) {
|
||||
pr_err("Failed to import overdrive table!\n");
|
||||
return ret;
|
||||
}
|
||||
// no lock needed because smu_od_edit_dpm_table has it
|
||||
ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
|
||||
AMD_PP_TASK_READJUST_POWER_STATE,
|
||||
false);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
case PP_OD_EDIT_VDDC_CURVE:
|
||||
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
|
||||
pr_warn("GFXCLK_CURVE not supported!\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
if (size < 3) {
|
||||
pr_info("invalid number of parameters: %d\n", size);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!od_table) {
|
||||
pr_info("Overdrive is not initialized\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (input[0]) {
|
||||
case 0:
|
||||
freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
|
||||
voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
|
||||
freq_ptr = &od_table->GfxclkFreq1;
|
||||
voltage_ptr = &od_table->GfxclkVolt1;
|
||||
break;
|
||||
case 1:
|
||||
freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
|
||||
voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
|
||||
freq_ptr = &od_table->GfxclkFreq2;
|
||||
voltage_ptr = &od_table->GfxclkVolt2;
|
||||
break;
|
||||
case 2:
|
||||
freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
|
||||
voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
|
||||
freq_ptr = &od_table->GfxclkFreq3;
|
||||
voltage_ptr = &od_table->GfxclkVolt3;
|
||||
break;
|
||||
default:
|
||||
pr_info("Invalid VDDC_CURVE index: %ld\n", input[0]);
|
||||
pr_info("Supported indices: [0, 1, 2]\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = navi10_od_setting_check_range(od_settings, freq_setting, input[1]);
|
||||
if (ret)
|
||||
return ret;
|
||||
// Allow setting zero to disable the OverDrive VDDC curve
|
||||
if (input[2] != 0) {
|
||||
ret = navi10_od_setting_check_range(od_settings, voltage_setting, input[2]);
|
||||
if (ret)
|
||||
return ret;
|
||||
*freq_ptr = input[1];
|
||||
*voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
|
||||
pr_debug("OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
|
||||
} else {
|
||||
// If setting 0, disable all voltage curve settings
|
||||
od_table->GfxclkVolt1 = 0;
|
||||
od_table->GfxclkVolt2 = 0;
|
||||
od_table->GfxclkVolt3 = 0;
|
||||
}
|
||||
navi10_dump_od_table(od_table);
|
||||
break;
|
||||
default:
|
||||
return -ENOSYS;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct pptable_funcs navi10_ppt_funcs = {
|
||||
.tables_init = navi10_tables_init,
|
||||
|
@ -1742,6 +2068,9 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
|||
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
|
||||
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
|
||||
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
|
||||
.set_default_od_settings = navi10_set_default_od_settings,
|
||||
.od_edit_dpm_table = navi10_od_edit_dpm_table,
|
||||
.get_pptable_power_limit = navi10_get_pptable_power_limit,
|
||||
};
|
||||
|
||||
void navi10_set_ppt_funcs(struct smu_context *smu)
|
||||
|
|
|
@ -33,6 +33,11 @@
|
|||
#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717)
|
||||
#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448)
|
||||
|
||||
#define NAVI10_VOLTAGE_SCALE (4)
|
||||
|
||||
#define smnPCIE_LC_SPEED_CNTL 0x11140290
|
||||
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
|
||||
|
||||
extern void navi10_set_ppt_funcs(struct smu_context *smu);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#define SMU_11_0_PARTIAL_PPTABLE
|
||||
|
||||
#include "pp_debug.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
|
@ -31,6 +33,7 @@
|
|||
#include "atomfirmware.h"
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "smu_v11_0.h"
|
||||
#include "smu_v11_0_pptable.h"
|
||||
#include "soc15_common.h"
|
||||
#include "atom.h"
|
||||
#include "amd_pcie.h"
|
||||
|
@ -1045,13 +1048,44 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
|
||||
uint32_t od_limit, max_power_limit;
|
||||
struct smu_11_0_powerplay_table *powerplay_table = NULL;
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
powerplay_table = table_context->power_play_table;
|
||||
|
||||
max_power_limit = smu_get_pptable_power_limit(smu);
|
||||
|
||||
if (!max_power_limit) {
|
||||
// If we couldn't get the table limit, fall back on first-read value
|
||||
if (!smu->default_power_limit)
|
||||
smu->default_power_limit = smu->power_limit;
|
||||
max_power_limit = smu->default_power_limit;
|
||||
}
|
||||
|
||||
if (smu->od_enabled) {
|
||||
od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
|
||||
pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);
|
||||
|
||||
max_power_limit *= (100 + od_limit);
|
||||
max_power_limit /= 100;
|
||||
}
|
||||
|
||||
return max_power_limit;
|
||||
}
|
||||
|
||||
int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t max_power_limit;
|
||||
|
||||
if (n > smu->default_power_limit) {
|
||||
pr_err("New power limit is over the max allowed %d\n",
|
||||
smu->default_power_limit);
|
||||
max_power_limit = smu_v11_0_get_max_power_limit(smu);
|
||||
|
||||
if (n > max_power_limit) {
|
||||
pr_err("New power limit (%d) is over the max allowed %d\n",
|
||||
n,
|
||||
max_power_limit);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1779,3 +1813,30 @@ int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
|
|||
return ret;
|
||||
|
||||
}
|
||||
|
||||
int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
|
||||
{
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
int ret = 0;
|
||||
|
||||
if (initialize) {
|
||||
if (table_context->overdrive_table) {
|
||||
return -EINVAL;
|
||||
}
|
||||
table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
|
||||
if (!table_context->overdrive_table) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
|
||||
if (ret) {
|
||||
pr_err("Failed to export overdrive table!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
|
||||
if (ret) {
|
||||
pr_err("Failed to import overdrive table!\n");
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -466,7 +466,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
|
|||
sizeof(PPTable_t));
|
||||
|
||||
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
|
||||
table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -379,10 +379,6 @@ radeon_pci_remove(struct pci_dev *pdev)
|
|||
static void
|
||||
radeon_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
struct drm_device *ddev = pci_get_drvdata(pdev);
|
||||
#endif
|
||||
|
||||
/* if we are running in a VM, make sure the device
|
||||
* torn down properly on reboot/shutdown
|
||||
*/
|
||||
|
@ -390,13 +386,14 @@ radeon_pci_shutdown(struct pci_dev *pdev)
|
|||
radeon_pci_remove(pdev);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/* Some adapters need to be suspended before a
|
||||
/*
|
||||
* Some adapters need to be suspended before a
|
||||
* shutdown occurs in order to prevent an error
|
||||
* during kexec.
|
||||
* Make this power specific becauase it breaks
|
||||
* some non-power boards.
|
||||
*/
|
||||
radeon_suspend_kms(ddev, true, true, false);
|
||||
radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue