mirror of https://gitee.com/openkylin/linux.git
Merge branch 'drm-fixes-3.17' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
- fix a resume hang on mullins - fix an oops on module unload with vgaswitcheroo (radeon and nouveau) - fix possible hangs DMA engine hangs due to hw bugs * 'drm-fixes-3.17' of git://people.freedesktop.org/~agd5f/linux: drm/nouveau/runpm: fix module unload drm/radeon/px: fix module unload vgaswitcheroo: add vga_switcheroo_fini_domain_pm_ops drm/radeon: don't reset dma on r6xx-evergreen init drm/radeon: don't reset sdma on CIK init drm/radeon: don't reset dma on NI/SI init drm/radeon/dpm: fix resume on mullins drm/radeon: Disable HDP flush before every CS again for < r600 drm/radeon: delete unused PTE_* defines
This commit is contained in:
commit
b5591bd6a6
|
@ -108,7 +108,16 @@ void
|
|||
nouveau_vga_fini(struct nouveau_drm *drm)
|
||||
{
|
||||
struct drm_device *dev = drm->dev;
|
||||
bool runtime = false;
|
||||
|
||||
if (nouveau_runtime_pm == 1)
|
||||
runtime = true;
|
||||
if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
|
||||
runtime = true;
|
||||
|
||||
vga_switcheroo_unregister_client(dev->pdev);
|
||||
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
|
||||
vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
|
||||
vga_client_register(dev->pdev, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -489,13 +489,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
|
|||
{
|
||||
int r;
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
|
||||
r = cik_sdma_load_microcode(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
#define KV_MINIMUM_ENGINE_CLOCK 800
|
||||
#define SMC_RAM_END 0x40000
|
||||
|
||||
static int kv_enable_nb_dpm(struct radeon_device *rdev,
|
||||
bool enable);
|
||||
static void kv_init_graphics_levels(struct radeon_device *rdev);
|
||||
static int kv_calculate_ds_divider(struct radeon_device *rdev);
|
||||
static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
|
||||
|
@ -1295,6 +1297,9 @@ void kv_dpm_disable(struct radeon_device *rdev)
|
|||
{
|
||||
kv_smc_bapm_enable(rdev, false);
|
||||
|
||||
if (rdev->family == CHIP_MULLINS)
|
||||
kv_enable_nb_dpm(rdev, false);
|
||||
|
||||
/* powerup blocks */
|
||||
kv_dpm_powergate_acp(rdev, false);
|
||||
kv_dpm_powergate_samu(rdev, false);
|
||||
|
@ -1769,15 +1774,24 @@ static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int kv_enable_nb_dpm(struct radeon_device *rdev)
|
||||
static int kv_enable_nb_dpm(struct radeon_device *rdev,
|
||||
bool enable)
|
||||
{
|
||||
struct kv_power_info *pi = kv_get_pi(rdev);
|
||||
int ret = 0;
|
||||
|
||||
if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
|
||||
ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
|
||||
if (ret == 0)
|
||||
pi->nb_dpm_enabled = true;
|
||||
if (enable) {
|
||||
if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
|
||||
ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
|
||||
if (ret == 0)
|
||||
pi->nb_dpm_enabled = true;
|
||||
}
|
||||
} else {
|
||||
if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
|
||||
ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
|
||||
if (ret == 0)
|
||||
pi->nb_dpm_enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1864,7 +1878,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
|
|||
}
|
||||
kv_update_sclk_t(rdev);
|
||||
if (rdev->family == CHIP_MULLINS)
|
||||
kv_enable_nb_dpm(rdev);
|
||||
kv_enable_nb_dpm(rdev, true);
|
||||
}
|
||||
} else {
|
||||
if (pi->enable_dpm) {
|
||||
|
@ -1889,7 +1903,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
|
|||
}
|
||||
kv_update_acp_boot_level(rdev);
|
||||
kv_update_sclk_t(rdev);
|
||||
kv_enable_nb_dpm(rdev);
|
||||
kv_enable_nb_dpm(rdev, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -191,12 +191,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
|
|||
u32 reg_offset, wb_offset;
|
||||
int i, r;
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == 0) {
|
||||
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
|
||||
|
|
|
@ -821,6 +821,20 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
|
|||
return RREG32(RADEON_CRTC2_CRNT_FRAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* r100_ring_hdp_flush - flush Host Data Path via the ring buffer
|
||||
* rdev: radeon device structure
|
||||
* ring: ring buffer struct for emitting packets
|
||||
*/
|
||||
static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
|
||||
RADEON_HDP_READ_BUFFER_INVALIDATE);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
|
||||
}
|
||||
|
||||
/* Who ever call radeon_fence_emit should call ring_lock and ask
|
||||
* for enough space (today caller are ib schedule and buffer move) */
|
||||
void r100_fence_ring_emit(struct radeon_device *rdev,
|
||||
|
@ -1056,20 +1070,6 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
|
|||
(void)RREG32(RADEON_CP_RB_WPTR);
|
||||
}
|
||||
|
||||
/**
|
||||
* r100_ring_hdp_flush - flush Host Data Path via the ring buffer
|
||||
* rdev: radeon device structure
|
||||
* ring: ring buffer struct for emitting packets
|
||||
*/
|
||||
void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
|
||||
RADEON_HDP_READ_BUFFER_INVALIDATE);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
|
||||
}
|
||||
|
||||
static void r100_cp_load_microcode(struct radeon_device *rdev)
|
||||
{
|
||||
const __be32 *fw_data;
|
||||
|
|
|
@ -124,15 +124,6 @@ int r600_dma_resume(struct radeon_device *rdev)
|
|||
u32 rb_bufsz;
|
||||
int r;
|
||||
|
||||
/* Reset dma */
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
|
||||
else
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
|
||||
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
|
||||
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
|
||||
|
||||
|
|
|
@ -44,13 +44,6 @@
|
|||
#define R6XX_MAX_PIPES 8
|
||||
#define R6XX_MAX_PIPES_MASK 0xff
|
||||
|
||||
/* PTE flags */
|
||||
#define PTE_VALID (1 << 0)
|
||||
#define PTE_SYSTEM (1 << 1)
|
||||
#define PTE_SNOOPED (1 << 2)
|
||||
#define PTE_READABLE (1 << 5)
|
||||
#define PTE_WRITEABLE (1 << 6)
|
||||
|
||||
/* tiling bits */
|
||||
#define ARRAY_LINEAR_GENERAL 0x00000000
|
||||
#define ARRAY_LINEAR_ALIGNED 0x00000001
|
||||
|
|
|
@ -185,7 +185,6 @@ static struct radeon_asic_ring r100_gfx_ring = {
|
|||
.get_rptr = &r100_gfx_get_rptr,
|
||||
.get_wptr = &r100_gfx_get_wptr,
|
||||
.set_wptr = &r100_gfx_set_wptr,
|
||||
.hdp_flush = &r100_ring_hdp_flush,
|
||||
};
|
||||
|
||||
static struct radeon_asic r100_asic = {
|
||||
|
@ -332,7 +331,6 @@ static struct radeon_asic_ring r300_gfx_ring = {
|
|||
.get_rptr = &r100_gfx_get_rptr,
|
||||
.get_wptr = &r100_gfx_get_wptr,
|
||||
.set_wptr = &r100_gfx_set_wptr,
|
||||
.hdp_flush = &r100_ring_hdp_flush,
|
||||
};
|
||||
|
||||
static struct radeon_asic r300_asic = {
|
||||
|
|
|
@ -148,8 +148,7 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
|
|||
struct radeon_ring *ring);
|
||||
void r100_gfx_set_wptr(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring);
|
||||
void r100_ring_hdp_flush(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring);
|
||||
|
||||
/*
|
||||
* r200,rv250,rs300,rv280
|
||||
*/
|
||||
|
|
|
@ -1393,7 +1393,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
|
||||
r = radeon_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
goto failed;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
|
@ -1413,7 +1413,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
radeon_agp_disable(rdev);
|
||||
r = radeon_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if ((radeon_testing & 1)) {
|
||||
|
@ -1435,6 +1435,11 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
|
||||
}
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
if (runtime)
|
||||
vga_switcheroo_fini_domain_pm_ops(rdev->dev);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void radeon_debugfs_remove_files(struct radeon_device *rdev);
|
||||
|
@ -1455,6 +1460,8 @@ void radeon_device_fini(struct radeon_device *rdev)
|
|||
radeon_bo_evict_vram(rdev);
|
||||
radeon_fini(rdev);
|
||||
vga_switcheroo_unregister_client(rdev->pdev);
|
||||
if (rdev->flags & RADEON_IS_PX)
|
||||
vga_switcheroo_fini_domain_pm_ops(rdev->dev);
|
||||
vga_client_register(rdev->pdev, NULL, NULL, NULL);
|
||||
if (rdev->rio_mem)
|
||||
pci_iounmap(rdev->pdev, rdev->rio_mem);
|
||||
|
|
|
@ -83,7 +83,7 @@
|
|||
* CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
|
||||
* 2.39.0 - Add INFO query for number of active CUs
|
||||
* 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
|
||||
* CS to GPU
|
||||
* CS to GPU on >= r600
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 2
|
||||
#define KMS_DRIVER_MINOR 40
|
||||
|
|
|
@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *
|
|||
}
|
||||
EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
|
||||
|
||||
void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
|
||||
{
|
||||
dev->pm_domain = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
|
||||
|
||||
static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
|
|
@ -64,6 +64,7 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
|
|||
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
|
||||
|
||||
int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
|
||||
void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
|
||||
int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
|
||||
#else
|
||||
|
||||
|
@ -82,6 +83,7 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
|
|||
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
|
||||
|
||||
static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
|
||||
static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
|
||||
static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue