Merge tag 'amd-drm-fixes-5.12-2021-03-24' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-5.12-2021-03-24:

amdgpu:
- S0ix fixes
- Add PCI ID
- Polaris PCIe DPM fix
- Display fix for high refresh rate monitors

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210324210630.3949-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2021-03-26 06:28:17 +10:00
commit 4e8d123fca
17 changed files with 364 additions and 141 deletions

View File

@ -1007,13 +1007,9 @@ struct amdgpu_device {
/* s3/s4 mask */ /* s3/s4 mask */
bool in_suspend; bool in_suspend;
bool in_hibernate; bool in_s3;
bool in_s4;
/* bool in_s0ix;
* The combination flag in_poweroff_reboot_com used to identify the poweroff
* and reboot opt in the s0i3 system-wide suspend.
*/
bool in_poweroff_reboot_com;
atomic_t in_gpu_reset; atomic_t in_gpu_reset;
enum pp_mp1_state mp1_state; enum pp_mp1_state mp1_state;

View File

@ -2371,6 +2371,10 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized) if (!adev->ip_blocks[i].status.late_initialized)
continue; continue;
/* skip CG for GFX on S0ix */
if (adev->in_s0ix &&
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
continue;
/* skip CG for VCE/UVD, it's handled specially */ /* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@ -2402,6 +2406,10 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized) if (!adev->ip_blocks[i].status.late_initialized)
continue; continue;
/* skip PG for GFX on S0ix */
if (adev->in_s0ix &&
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
continue;
/* skip CG for VCE/UVD, it's handled specially */ /* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@ -2678,11 +2686,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{ {
int i, r; int i, r;
if (adev->in_poweroff_reboot_com || amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) { amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) { for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid) if (!adev->ip_blocks[i].status.valid)
@ -2722,6 +2727,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
{ {
int i, r; int i, r;
if (adev->in_s0ix)
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) { for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid) if (!adev->ip_blocks[i].status.valid)
continue; continue;
@ -2734,6 +2742,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false; adev->ip_blocks[i].status.hw = false;
continue; continue;
} }
/* skip suspend of gfx and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
* like at runtime. PSP is also part of the always on hardware
* so no need to suspend it.
*/
if (adev->in_s0ix &&
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
continue;
/* XXX handle errors */ /* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev); r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */ /* XXX handle errors */
@ -3673,14 +3692,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
*/ */
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{ {
struct amdgpu_device *adev; struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
int r; int r;
adev = drm_to_adev(dev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0; return 0;
@ -3692,61 +3706,19 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
cancel_delayed_work_sync(&adev->delayed_init_work); cancel_delayed_work_sync(&adev->delayed_init_work);
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
drm_helper_connector_dpms(connector,
DRM_MODE_DPMS_OFF);
drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
}
if (fb == NULL || fb->obj[0] == NULL) {
continue;
}
robj = gem_to_amdgpu_bo(fb->obj[0]);
/* don't unpin kernel fb objects */
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
r = amdgpu_bo_reserve(robj, true);
if (r == 0) {
amdgpu_bo_unpin(robj);
amdgpu_bo_unreserve(robj);
}
}
}
}
amdgpu_ras_suspend(adev); amdgpu_ras_suspend(adev);
r = amdgpu_device_ip_suspend_phase1(adev); r = amdgpu_device_ip_suspend_phase1(adev);
amdgpu_amdkfd_suspend(adev, adev->in_runpm); if (!adev->in_s0ix)
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
/* evict vram memory */ /* evict vram memory */
amdgpu_bo_evict_vram(adev); amdgpu_bo_evict_vram(adev);
amdgpu_fence_driver_suspend(adev); amdgpu_fence_driver_suspend(adev);
if (adev->in_poweroff_reboot_com || r = amdgpu_device_ip_suspend_phase2(adev);
!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
r = amdgpu_device_ip_suspend_phase2(adev);
else
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
/* evict remaining vram memory /* evict remaining vram memory
* This second call to evict vram is to evict the gart page table * This second call to evict vram is to evict the gart page table
* using the CPU. * using the CPU.
@ -3768,16 +3740,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
*/ */
int amdgpu_device_resume(struct drm_device *dev, bool fbcon) int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
{ {
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_crtc *crtc;
int r = 0; int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0; return 0;
if (amdgpu_acpi_is_s0ix_supported(adev)) if (adev->in_s0ix)
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry); amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
/* post card */ /* post card */
@ -3802,50 +3771,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
queue_delayed_work(system_wq, &adev->delayed_init_work, queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS)); msecs_to_jiffies(AMDGPU_RESUME_MS));
if (!amdgpu_device_has_dc_support(adev)) { if (!adev->in_s0ix) {
/* pin cursors */ r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (r)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); return r;
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
if (r != 0)
dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
amdgpu_bo_unreserve(aobj);
}
}
}
} }
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
if (r)
return r;
/* Make sure IB tests flushed */ /* Make sure IB tests flushed */
flush_delayed_work(&adev->delayed_init_work); flush_delayed_work(&adev->delayed_init_work);
/* blat the mode back in */ if (fbcon)
if (fbcon) {
if (!amdgpu_device_has_dc_support(adev)) {
/* pre DCE11 */
drm_helper_resume_force_mode(dev);
/* turn on display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
drm_helper_connector_dpms(connector,
DRM_MODE_DPMS_ON);
drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
}
amdgpu_fbdev_set_suspend(adev, 0); amdgpu_fbdev_set_suspend(adev, 0);
}
drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable(dev);

View File

@ -1310,3 +1310,92 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
stime, etime, mode); stime, etime, mode);
} }
int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
int r;
/* turn off display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
drm_helper_connector_dpms(connector,
DRM_MODE_DPMS_OFF);
drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
}
if (fb == NULL || fb->obj[0] == NULL) {
continue;
}
robj = gem_to_amdgpu_bo(fb->obj[0]);
/* don't unpin kernel fb objects */
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
r = amdgpu_bo_reserve(robj, true);
if (r == 0) {
amdgpu_bo_unpin(robj);
amdgpu_bo_unreserve(robj);
}
}
}
return r;
}
int amdgpu_display_resume_helper(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct drm_crtc *crtc;
int r;
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
if (r != 0)
dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
amdgpu_bo_unreserve(aobj);
}
}
}
drm_helper_resume_force_mode(dev);
/* turn on display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
drm_helper_connector_dpms(connector,
DRM_MODE_DPMS_ON);
drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
return 0;
}

View File

@ -47,4 +47,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
const struct drm_format_info * const struct drm_format_info *
amdgpu_lookup_format_info(u32 format, uint64_t modifier); amdgpu_lookup_format_info(u32 format, uint64_t modifier);
int amdgpu_display_suspend_helper(struct amdgpu_device *adev);
int amdgpu_display_resume_helper(struct amdgpu_device *adev);
#endif #endif

View File

@ -1107,6 +1107,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
/* Van Gogh */ /* Van Gogh */
@ -1274,24 +1275,35 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
*/ */
if (!amdgpu_passthrough(adev)) if (!amdgpu_passthrough(adev))
adev->mp1_state = PP_MP1_STATE_UNLOAD; adev->mp1_state = PP_MP1_STATE_UNLOAD;
adev->in_poweroff_reboot_com = true;
amdgpu_device_ip_suspend(adev); amdgpu_device_ip_suspend(adev);
adev->in_poweroff_reboot_com = false;
adev->mp1_state = PP_MP1_STATE_NONE; adev->mp1_state = PP_MP1_STATE_NONE;
} }
static int amdgpu_pmops_suspend(struct device *dev) static int amdgpu_pmops_suspend(struct device *dev)
{ {
struct drm_device *drm_dev = dev_get_drvdata(dev); struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
return amdgpu_device_suspend(drm_dev, true); if (amdgpu_acpi_is_s0ix_supported(adev))
adev->in_s0ix = true;
adev->in_s3 = true;
r = amdgpu_device_suspend(drm_dev, true);
adev->in_s3 = false;
return r;
} }
static int amdgpu_pmops_resume(struct device *dev) static int amdgpu_pmops_resume(struct device *dev)
{ {
struct drm_device *drm_dev = dev_get_drvdata(dev); struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
return amdgpu_device_resume(drm_dev, true); r = amdgpu_device_resume(drm_dev, true);
if (amdgpu_acpi_is_s0ix_supported(adev))
adev->in_s0ix = false;
return r;
} }
static int amdgpu_pmops_freeze(struct device *dev) static int amdgpu_pmops_freeze(struct device *dev)
@ -1300,9 +1312,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev); struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r; int r;
adev->in_hibernate = true; adev->in_s4 = true;
r = amdgpu_device_suspend(drm_dev, true); r = amdgpu_device_suspend(drm_dev, true);
adev->in_hibernate = false; adev->in_s4 = false;
if (r) if (r)
return r; return r;
return amdgpu_asic_reset(adev); return amdgpu_asic_reset(adev);
@ -1318,13 +1330,8 @@ static int amdgpu_pmops_thaw(struct device *dev)
static int amdgpu_pmops_poweroff(struct device *dev) static int amdgpu_pmops_poweroff(struct device *dev)
{ {
struct drm_device *drm_dev = dev_get_drvdata(dev); struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
adev->in_poweroff_reboot_com = true; return amdgpu_device_suspend(drm_dev, true);
r = amdgpu_device_suspend(drm_dev, true);
adev->in_poweroff_reboot_com = false;
return r;
} }
static int amdgpu_pmops_restore(struct device *dev) static int amdgpu_pmops_restore(struct device *dev)

View File

@ -1028,13 +1028,10 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
{ {
struct ttm_resource_manager *man; struct ttm_resource_manager *man;
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
#ifndef CONFIG_HIBERNATION /* No need to evict vram on APUs for suspend to ram */
if (adev->flags & AMD_IS_APU) {
/* Useless to evict on IGP chips */
return 0; return 0;
} }
#endif
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return ttm_resource_manager_evict_all(&adev->mman.bdev, man); return ttm_resource_manager_evict_all(&adev->mman.bdev, man);

View File

@ -2897,6 +2897,11 @@ static int dce_v10_0_hw_fini(void *handle)
static int dce_v10_0_suspend(void *handle) static int dce_v10_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_display_suspend_helper(adev);
if (r)
return r;
adev->mode_info.bl_level = adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@ -2921,8 +2926,10 @@ static int dce_v10_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level); bl_level);
} }
if (ret)
return ret;
return ret; return amdgpu_display_resume_helper(adev);
} }
static bool dce_v10_0_is_idle(void *handle) static bool dce_v10_0_is_idle(void *handle)

View File

@ -3027,6 +3027,11 @@ static int dce_v11_0_hw_fini(void *handle)
static int dce_v11_0_suspend(void *handle) static int dce_v11_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_display_suspend_helper(adev);
if (r)
return r;
adev->mode_info.bl_level = adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@ -3051,8 +3056,10 @@ static int dce_v11_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level); bl_level);
} }
if (ret)
return ret;
return ret; return amdgpu_display_resume_helper(adev);
} }
static bool dce_v11_0_is_idle(void *handle) static bool dce_v11_0_is_idle(void *handle)

View File

@ -2770,7 +2770,11 @@ static int dce_v6_0_hw_fini(void *handle)
static int dce_v6_0_suspend(void *handle) static int dce_v6_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_display_suspend_helper(adev);
if (r)
return r;
adev->mode_info.bl_level = adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@ -2794,8 +2798,10 @@ static int dce_v6_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level); bl_level);
} }
if (ret)
return ret;
return ret; return amdgpu_display_resume_helper(adev);
} }
static bool dce_v6_0_is_idle(void *handle) static bool dce_v6_0_is_idle(void *handle)

View File

@ -2796,6 +2796,11 @@ static int dce_v8_0_hw_fini(void *handle)
static int dce_v8_0_suspend(void *handle) static int dce_v8_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_display_suspend_helper(adev);
if (r)
return r;
adev->mode_info.bl_level = adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@ -2820,8 +2825,10 @@ static int dce_v8_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level); bl_level);
} }
if (ret)
return ret;
return ret; return amdgpu_display_resume_helper(adev);
} }
static bool dce_v8_0_is_idle(void *handle) static bool dce_v8_0_is_idle(void *handle)

View File

@ -39,6 +39,7 @@
#include "dce_v11_0.h" #include "dce_v11_0.h"
#include "dce_virtual.h" #include "dce_virtual.h"
#include "ivsrcid/ivsrcid_vislands30.h" #include "ivsrcid/ivsrcid_vislands30.h"
#include "amdgpu_display.h"
#define DCE_VIRTUAL_VBLANK_PERIOD 16666666 #define DCE_VIRTUAL_VBLANK_PERIOD 16666666
@ -491,12 +492,24 @@ static int dce_virtual_hw_fini(void *handle)
static int dce_virtual_suspend(void *handle) static int dce_virtual_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_display_suspend_helper(adev);
if (r)
return r;
return dce_virtual_hw_fini(handle); return dce_virtual_hw_fini(handle);
} }
static int dce_virtual_resume(void *handle) static int dce_virtual_resume(void *handle)
{ {
return dce_virtual_hw_init(handle); struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = dce_virtual_hw_init(handle);
if (r)
return r;
return amdgpu_display_resume_helper(adev);
} }
static bool dce_virtual_is_idle(void *handle) static bool dce_virtual_is_idle(void *handle)

View File

@ -341,8 +341,7 @@ void enc2_hw_init(struct link_encoder *enc)
} else { } else {
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110); AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d); AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
} }
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32; //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;

View File

@ -587,6 +587,48 @@ static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
tmp, MC_CG_ARB_FREQ_F0); tmp, MC_CG_ARB_FREQ_F0);
} }
static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint16_t pcie_gen = 0;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 &&
adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_gen = 3;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 &&
adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
pcie_gen = 2;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 &&
adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2)
pcie_gen = 1;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 &&
adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1)
pcie_gen = 0;
return pcie_gen;
}
static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint16_t pcie_width = 0;
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
pcie_width = 16;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
pcie_width = 12;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
pcie_width = 8;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
pcie_width = 4;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
pcie_width = 2;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
pcie_width = 1;
return pcie_width;
}
static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
{ {
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@ -683,6 +725,11 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
PP_Min_PCIEGen), PP_Min_PCIEGen),
get_pcie_lane_support(data->pcie_lane_cap, get_pcie_lane_support(data->pcie_lane_cap,
PP_Max_PCIELane)); PP_Max_PCIELane));
if (data->pcie_dpm_key_disabled)
phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
data->dpm_table.pcie_speed_table.count,
smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));
} }
return 0; return 0;
} }
@ -1248,6 +1295,13 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
NULL)), NULL)),
"Failed to enable pcie DPM during DPM Start Function!", "Failed to enable pcie DPM during DPM Start Function!",
return -EINVAL); return -EINVAL);
} else {
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PCIeDPM_Disable,
NULL)),
"Failed to disble pcie DPM during DPM Start Function!",
return -EINVAL);
} }
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,

View File

@ -54,6 +54,9 @@
#include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h" #include "smuio/smuio_9_0_sh_mask.h"
#define smnPCIE_LC_SPEED_CNTL 0x11140290
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
#define HBM_MEMORY_CHANNEL_WIDTH 128 #define HBM_MEMORY_CHANNEL_WIDTH 128
static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
@ -443,8 +446,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_VCEDPM)) if (PP_CAP(PHM_PlatformCaps_VCEDPM))
data->smu_features[GNLD_DPM_VCE].supported = true; data->smu_features[GNLD_DPM_VCE].supported = true;
if (!data->registry_data.pcie_dpm_key_disabled) data->smu_features[GNLD_DPM_LINK].supported = true;
data->smu_features[GNLD_DPM_LINK].supported = true;
if (!data->registry_data.dcefclk_dpm_key_disabled) if (!data->registry_data.dcefclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_DCEFCLK].supported = true; data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
@ -1544,6 +1546,13 @@ static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
pp_table->PcieLaneCount[i] = pcie_width; pp_table->PcieLaneCount[i] = pcie_width;
} }
if (data->registry_data.pcie_dpm_key_disabled) {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pp_table->PcieGenSpeed[i] = pcie_gen;
pp_table->PcieLaneCount[i] = pcie_width;
}
}
return 0; return 0;
} }
@ -2966,6 +2975,14 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
} }
} }
if (data->registry_data.pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap),
"Attempt to Disable Link DPM feature Failed!", return -EINVAL);
data->smu_features[GNLD_DPM_LINK].enabled = false;
data->smu_features[GNLD_DPM_LINK].supported = false;
}
return 0; return 0;
} }
@ -4584,6 +4601,24 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return 0; return 0;
} }
static int vega10_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
}
static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
}
static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf) enum pp_clock_type type, char *buf)
{ {
@ -4592,8 +4627,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table); struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table); struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
PPTable_t *pptable = &(data->smc_state_table.pp_table);
int i, now, size = 0, count = 0; int i, now, size = 0, count = 0;
@ -4650,15 +4686,31 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
"*" : ""); "*" : "");
break; break;
case PP_PCIE: case PP_PCIE:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now); current_gen_speed =
vega10_get_current_pcie_link_speed_level(hwmgr);
current_lane_width =
vega10_get_current_pcie_link_width_level(hwmgr);
for (i = 0; i < NUM_LINK_LEVELS; i++) {
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
for (i = 0; i < pcie_table->count; i++) size += sprintf(buf + size, "%d: %s %s %s\n", i,
size += sprintf(buf + size, "%d: %s %s\n", i, (gen_speed == 0) ? "2.5GT/s," :
(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" : (gen_speed == 1) ? "5.0GT/s," :
(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" : (gen_speed == 2) ? "8.0GT/s," :
(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "", (gen_speed == 3) ? "16.0GT/s," : "",
(i == now) ? "*" : ""); (lane_width == 1) ? "x1" :
(lane_width == 2) ? "x2" :
(lane_width == 3) ? "x4" :
(lane_width == 4) ? "x8" :
(lane_width == 5) ? "x12" :
(lane_width == 6) ? "x16" : "",
(current_gen_speed == gen_speed) &&
(current_lane_width == lane_width) ?
"*" : "");
}
break; break;
case OD_SCLK: case OD_SCLK:
if (hwmgr->od_enabled) { if (hwmgr->od_enabled) {
size = sprintf(buf, "%s:\n", "OD_SCLK"); size = sprintf(buf, "%s:\n", "OD_SCLK");

View File

@ -133,6 +133,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.auto_wattman_debug = 0; data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100; data->registry_data.auto_wattman_sample_period = 100;
data->registry_data.auto_wattman_threshold = 50; data->registry_data.auto_wattman_threshold = 50;
data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
} }
static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr) static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
@ -539,6 +540,29 @@ static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
pp_table->PcieLaneCount[i] = pcie_width_arg; pp_table->PcieLaneCount[i] = pcie_width_arg;
} }
/* override to the highest if it's disabled from ppfeaturmask */
if (data->registry_data.pcie_dpm_key_disabled) {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
pp_table->PcieGenSpeed[i] = pcie_gen;
pp_table->PcieLaneCount[i] = pcie_width;
}
ret = vega12_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
PP_ASSERT_WITH_CODE(!ret,
"Attempt to Disable DPM LINK Failed!",
return ret);
data->smu_features[GNLD_DPM_LINK].enabled = false;
data->smu_features[GNLD_DPM_LINK].supported = false;
}
return 0; return 0;
} }

View File

@ -171,6 +171,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.gfxoff_controlled_by_driver = 1; data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false; data->gfxoff_allowed = false;
data->counter_gfxoff = 0; data->counter_gfxoff = 0;
data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
} }
static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
@ -884,6 +885,30 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
pp_table->PcieLaneCount[i] = pcie_width_arg; pp_table->PcieLaneCount[i] = pcie_width_arg;
} }
/* override to the highest if it's disabled from ppfeaturmask */
if (data->registry_data.pcie_dpm_key_disabled) {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
pp_table->PcieGenSpeed[i] = pcie_gen;
pp_table->PcieLaneCount[i] = pcie_width;
}
ret = vega20_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
PP_ASSERT_WITH_CODE(!ret,
"Attempt to Disable DPM LINK Failed!",
return ret);
data->smu_features[GNLD_DPM_LINK].enabled = false;
data->smu_features[GNLD_DPM_LINK].supported = false;
}
return 0; return 0;
} }

View File

@ -1294,7 +1294,7 @@ static int smu_disable_dpms(struct smu_context *smu)
bool use_baco = !smu->is_apu && bool use_baco = !smu->is_apu &&
((amdgpu_in_reset(adev) && ((amdgpu_in_reset(adev) &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
/* /*
* For custom pptable uploading, skip the DPM features * For custom pptable uploading, skip the DPM features
@ -1431,7 +1431,8 @@ static int smu_suspend(void *handle)
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
if (smu->is_apu) /* skip CGPG when in S0ix */
if (smu->is_apu && !adev->in_s0ix)
smu_set_gfx_cgpg(&adev->smu, false); smu_set_gfx_cgpg(&adev->smu, false);
return 0; return 0;