Merge branch 'drm-fixes-4.12' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
A bunch of bug fixes: - Fix display flickering on some chips at high refresh rates - suspend/resume fix - hotplug fix - a couple of segfault fixes for certain cases * 'drm-fixes-4.12' of git://people.freedesktop.org/~agd5f/linux: drm/amdgpu: fix null point error when rmmod amdgpu. drm/amd/powerplay: fix a signedness bugs drm/amdgpu: fix NULL pointer panic of emit_gds_switch drm/radeon: Unbreak HPD handling for r600+ drm/amd/powerplay/smu7: disable mclk switching for high refresh rates drm/amd/powerplay/smu7: add vblank check for mclk switching (v2) drm/radeon/ci: disable mclk switching for high refresh rates (v2) drm/amdgpu/ci: disable mclk switching for high refresh rates (v2) drm/amdgpu: fix fundamental suspend/resume issue
This commit is contained in:
commit
bc1f0e04da
|
@ -425,10 +425,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
|
||||||
|
|
||||||
void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
|
void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
|
struct amdgpu_fbdev *afbdev;
|
||||||
struct drm_fb_helper *fb_helper;
|
struct drm_fb_helper *fb_helper;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!adev)
|
||||||
|
return;
|
||||||
|
|
||||||
|
afbdev = adev->mode_info.rfbdev;
|
||||||
|
|
||||||
if (!afbdev)
|
if (!afbdev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
|
@ -634,7 +634,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
|
||||||
mutex_unlock(&id_mgr->lock);
|
mutex_unlock(&id_mgr->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gds_switch_needed) {
|
if (ring->funcs->emit_gds_switch && gds_switch_needed) {
|
||||||
id->gds_base = job->gds_base;
|
id->gds_base = job->gds_base;
|
||||||
id->gds_size = job->gds_size;
|
id->gds_size = job->gds_size;
|
||||||
id->gws_base = job->gws_base;
|
id->gws_base = job->gws_base;
|
||||||
|
@ -672,6 +672,7 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
|
||||||
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||||
struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
|
struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
|
||||||
|
|
||||||
|
atomic64_set(&id->owner, 0);
|
||||||
id->gds_base = 0;
|
id->gds_base = 0;
|
||||||
id->gds_size = 0;
|
id->gds_size = 0;
|
||||||
id->gws_base = 0;
|
id->gws_base = 0;
|
||||||
|
@ -680,6 +681,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
|
||||||
id->oa_size = 0;
|
id->oa_size = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_vm_reset_all_id - reset VMID to zero
|
||||||
|
*
|
||||||
|
* @adev: amdgpu device structure
|
||||||
|
*
|
||||||
|
* Reset VMID to force flush on next use
|
||||||
|
*/
|
||||||
|
void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
unsigned i, j;
|
||||||
|
|
||||||
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
|
||||||
|
struct amdgpu_vm_id_manager *id_mgr =
|
||||||
|
&adev->vm_manager.id_mgr[i];
|
||||||
|
|
||||||
|
for (j = 1; j < id_mgr->num_ids; ++j)
|
||||||
|
amdgpu_vm_reset_id(adev, i, j);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
|
* amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
|
||||||
*
|
*
|
||||||
|
@ -2270,7 +2291,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||||
adev->vm_manager.seqno[i] = 0;
|
adev->vm_manager.seqno[i] = 0;
|
||||||
|
|
||||||
|
|
||||||
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
|
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
|
||||||
atomic64_set(&adev->vm_manager.client_counter, 0);
|
atomic64_set(&adev->vm_manager.client_counter, 0);
|
||||||
spin_lock_init(&adev->vm_manager.prt_lock);
|
spin_lock_init(&adev->vm_manager.prt_lock);
|
||||||
|
|
|
@ -204,6 +204,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||||
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
|
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
|
||||||
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
|
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
|
||||||
unsigned vmid);
|
unsigned vmid);
|
||||||
|
void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
|
||||||
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm);
|
struct amdgpu_vm *vm);
|
||||||
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||||
|
|
|
@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
|
||||||
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
||||||
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
|
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
|
||||||
|
|
||||||
|
/* disable mclk switching if the refresh is >120Hz, even if the
|
||||||
|
* blanking period would allow it
|
||||||
|
*/
|
||||||
|
if (amdgpu_dpm_get_vrefresh(adev) > 120)
|
||||||
|
return true;
|
||||||
|
|
||||||
if (vblank_time < switch_limit)
|
if (vblank_time < switch_limit)
|
||||||
return true;
|
return true;
|
||||||
else
|
else
|
||||||
|
|
|
@ -950,10 +950,6 @@ static int gmc_v6_0_suspend(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev->vm_manager.enabled) {
|
|
||||||
gmc_v6_0_vm_fini(adev);
|
|
||||||
adev->vm_manager.enabled = false;
|
|
||||||
}
|
|
||||||
gmc_v6_0_hw_fini(adev);
|
gmc_v6_0_hw_fini(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -968,16 +964,9 @@ static int gmc_v6_0_resume(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (!adev->vm_manager.enabled) {
|
amdgpu_vm_reset_all_ids(adev);
|
||||||
r = gmc_v6_0_vm_init(adev);
|
|
||||||
if (r) {
|
|
||||||
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
adev->vm_manager.enabled = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return r;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool gmc_v6_0_is_idle(void *handle)
|
static bool gmc_v6_0_is_idle(void *handle)
|
||||||
|
|
|
@ -1117,10 +1117,6 @@ static int gmc_v7_0_suspend(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev->vm_manager.enabled) {
|
|
||||||
gmc_v7_0_vm_fini(adev);
|
|
||||||
adev->vm_manager.enabled = false;
|
|
||||||
}
|
|
||||||
gmc_v7_0_hw_fini(adev);
|
gmc_v7_0_hw_fini(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1135,16 +1131,9 @@ static int gmc_v7_0_resume(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (!adev->vm_manager.enabled) {
|
amdgpu_vm_reset_all_ids(adev);
|
||||||
r = gmc_v7_0_vm_init(adev);
|
|
||||||
if (r) {
|
|
||||||
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
adev->vm_manager.enabled = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return r;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool gmc_v7_0_is_idle(void *handle)
|
static bool gmc_v7_0_is_idle(void *handle)
|
||||||
|
|
|
@ -1209,10 +1209,6 @@ static int gmc_v8_0_suspend(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev->vm_manager.enabled) {
|
|
||||||
gmc_v8_0_vm_fini(adev);
|
|
||||||
adev->vm_manager.enabled = false;
|
|
||||||
}
|
|
||||||
gmc_v8_0_hw_fini(adev);
|
gmc_v8_0_hw_fini(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1227,16 +1223,9 @@ static int gmc_v8_0_resume(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (!adev->vm_manager.enabled) {
|
amdgpu_vm_reset_all_ids(adev);
|
||||||
r = gmc_v8_0_vm_init(adev);
|
|
||||||
if (r) {
|
|
||||||
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
adev->vm_manager.enabled = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return r;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool gmc_v8_0_is_idle(void *handle)
|
static bool gmc_v8_0_is_idle(void *handle)
|
||||||
|
|
|
@ -791,10 +791,6 @@ static int gmc_v9_0_suspend(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev->vm_manager.enabled) {
|
|
||||||
gmc_v9_0_vm_fini(adev);
|
|
||||||
adev->vm_manager.enabled = false;
|
|
||||||
}
|
|
||||||
gmc_v9_0_hw_fini(adev);
|
gmc_v9_0_hw_fini(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -809,17 +805,9 @@ static int gmc_v9_0_resume(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (!adev->vm_manager.enabled) {
|
amdgpu_vm_reset_all_ids(adev);
|
||||||
r = gmc_v9_0_vm_init(adev);
|
|
||||||
if (r) {
|
|
||||||
dev_err(adev->dev,
|
|
||||||
"vm manager initialization failed (%d).\n", r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
adev->vm_manager.enabled = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return r;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool gmc_v9_0_is_idle(void *handle)
|
static bool gmc_v9_0_is_idle(void *handle)
|
||||||
|
|
|
@ -2655,6 +2655,28 @@ static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
|
||||||
return sizeof(struct smu7_power_state);
|
return sizeof(struct smu7_power_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
|
||||||
|
uint32_t vblank_time_us)
|
||||||
|
{
|
||||||
|
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||||
|
uint32_t switch_limit_us;
|
||||||
|
|
||||||
|
switch (hwmgr->chip_id) {
|
||||||
|
case CHIP_POLARIS10:
|
||||||
|
case CHIP_POLARIS11:
|
||||||
|
case CHIP_POLARIS12:
|
||||||
|
switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vblank_time_us < switch_limit_us)
|
||||||
|
return true;
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
||||||
struct pp_power_state *request_ps,
|
struct pp_power_state *request_ps,
|
||||||
|
@ -2669,6 +2691,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
||||||
bool disable_mclk_switching;
|
bool disable_mclk_switching;
|
||||||
bool disable_mclk_switching_for_frame_lock;
|
bool disable_mclk_switching_for_frame_lock;
|
||||||
struct cgs_display_info info = {0};
|
struct cgs_display_info info = {0};
|
||||||
|
struct cgs_mode_info mode_info = {0};
|
||||||
const struct phm_clock_and_voltage_limits *max_limits;
|
const struct phm_clock_and_voltage_limits *max_limits;
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||||
|
@ -2677,6 +2700,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
||||||
int32_t count;
|
int32_t count;
|
||||||
int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
|
int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
|
||||||
|
|
||||||
|
info.mode_info = &mode_info;
|
||||||
data->battery_state = (PP_StateUILabel_Battery ==
|
data->battery_state = (PP_StateUILabel_Battery ==
|
||||||
request_ps->classification.ui_label);
|
request_ps->classification.ui_label);
|
||||||
|
|
||||||
|
@ -2703,8 +2727,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
||||||
|
|
||||||
cgs_get_active_displays_info(hwmgr->device, &info);
|
cgs_get_active_displays_info(hwmgr->device, &info);
|
||||||
|
|
||||||
/*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
|
|
||||||
|
|
||||||
minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
|
minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
|
||||||
minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
|
minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
|
||||||
|
|
||||||
|
@ -2769,8 +2791,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
||||||
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
|
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
|
||||||
|
|
||||||
|
|
||||||
disable_mclk_switching = (1 < info.display_count) ||
|
disable_mclk_switching = ((1 < info.display_count) ||
|
||||||
disable_mclk_switching_for_frame_lock;
|
disable_mclk_switching_for_frame_lock ||
|
||||||
|
smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
|
||||||
|
(mode_info.refresh_rate > 120));
|
||||||
|
|
||||||
sclk = smu7_ps->performance_levels[0].engine_clock;
|
sclk = smu7_ps->performance_levels[0].engine_clock;
|
||||||
mclk = smu7_ps->performance_levels[0].memory_clock;
|
mclk = smu7_ps->performance_levels[0].memory_clock;
|
||||||
|
|
|
@ -4186,7 +4186,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
|
||||||
enum pp_clock_type type, uint32_t mask)
|
enum pp_clock_type type, uint32_t mask)
|
||||||
{
|
{
|
||||||
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
|
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
|
||||||
uint32_t i;
|
int i;
|
||||||
|
|
||||||
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
|
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
|
||||||
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
|
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
|
||||||
u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
|
u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
|
||||||
|
|
||||||
|
/* disable mclk switching if the refresh is >120Hz, even if the
|
||||||
|
* blanking period would allow it
|
||||||
|
*/
|
||||||
|
if (r600_dpm_get_vrefresh(rdev) > 120)
|
||||||
|
return true;
|
||||||
|
|
||||||
if (vblank_time < switch_limit)
|
if (vblank_time < switch_limit)
|
||||||
return true;
|
return true;
|
||||||
else
|
else
|
||||||
|
|
|
@ -7401,7 +7401,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
|
||||||
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
|
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
|
||||||
tmp = RREG32(DC_HPD5_INT_CONTROL);
|
tmp = RREG32(DC_HPD6_INT_CONTROL);
|
||||||
tmp |= DC_HPDx_INT_ACK;
|
tmp |= DC_HPDx_INT_ACK;
|
||||||
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
|
@ -7431,7 +7431,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
|
||||||
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
|
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
|
||||||
tmp = RREG32(DC_HPD5_INT_CONTROL);
|
tmp = RREG32(DC_HPD6_INT_CONTROL);
|
||||||
tmp |= DC_HPDx_RX_INT_ACK;
|
tmp |= DC_HPDx_RX_INT_ACK;
|
||||||
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -4927,7 +4927,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
|
||||||
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
|
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
|
||||||
tmp = RREG32(DC_HPD5_INT_CONTROL);
|
tmp = RREG32(DC_HPD6_INT_CONTROL);
|
||||||
tmp |= DC_HPDx_INT_ACK;
|
tmp |= DC_HPDx_INT_ACK;
|
||||||
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
|
@ -4958,7 +4958,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
|
||||||
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
|
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
|
||||||
tmp = RREG32(DC_HPD5_INT_CONTROL);
|
tmp = RREG32(DC_HPD6_INT_CONTROL);
|
||||||
tmp |= DC_HPDx_RX_INT_ACK;
|
tmp |= DC_HPDx_RX_INT_ACK;
|
||||||
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3988,7 +3988,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
|
||||||
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
|
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
|
||||||
tmp = RREG32(DC_HPD5_INT_CONTROL);
|
tmp = RREG32(DC_HPD6_INT_CONTROL);
|
||||||
tmp |= DC_HPDx_INT_ACK;
|
tmp |= DC_HPDx_INT_ACK;
|
||||||
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -6317,7 +6317,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
|
||||||
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
|
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
|
||||||
tmp = RREG32(DC_HPD5_INT_CONTROL);
|
tmp = RREG32(DC_HPD6_INT_CONTROL);
|
||||||
tmp |= DC_HPDx_INT_ACK;
|
tmp |= DC_HPDx_INT_ACK;
|
||||||
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
|
@ -6348,7 +6348,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
|
||||||
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
|
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
|
||||||
tmp = RREG32(DC_HPD5_INT_CONTROL);
|
tmp = RREG32(DC_HPD6_INT_CONTROL);
|
||||||
tmp |= DC_HPDx_RX_INT_ACK;
|
tmp |= DC_HPDx_RX_INT_ACK;
|
||||||
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue