mirror of https://gitee.com/openkylin/linux.git
Merge tag 'amd-drm-next-5.10-2020-09-18' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.10-2020-09-18: amdgpu: - Support for PCIe DPC recovery - Sienna Cichlid updates - Navy Flounder updates - RAS fixes - Refactor DC interrupt handling - Display fixes - Fix issues with OLED panels - Mclk fixes for navi1x - Watermark fixes for renoir and raven2 - Misc code cleanups - Misc bug fixes amdkfd: - Fix a memory leak - Fix a crach in GPU reset - Add process eviction counters radeon: - expose sclk via sysfs hwmon interface - Revert bad PLL fix scheduler: - Kernel doc fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200918204322.3931-1-alexander.deucher@amd.com
This commit is contained in:
commit
fc88fef916
|
@ -49,6 +49,8 @@
|
|||
#include <linux/rbtree.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/dma-fence.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/aer.h>
|
||||
|
||||
#include <drm/ttm/ttm_bo_api.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
|
@ -987,6 +989,9 @@ struct amdgpu_device {
|
|||
atomic_t throttling_logging_enabled;
|
||||
struct ratelimit_state throttling_logging_rs;
|
||||
uint32_t ras_features;
|
||||
|
||||
bool in_pci_err_recovery;
|
||||
struct pci_saved_state *pci_state;
|
||||
};
|
||||
|
||||
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
|
||||
|
@ -1260,6 +1265,15 @@ static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return
|
|||
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
|
||||
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
|
||||
|
||||
pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev,
|
||||
pci_channel_state_t state);
|
||||
pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev);
|
||||
pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev);
|
||||
void amdgpu_pci_resume(struct pci_dev *pdev);
|
||||
|
||||
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
|
||||
bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
|
||||
|
||||
#include "amdgpu_object.h"
|
||||
|
||||
/* used by df_v3_6.c and amdgpu_pmu.c */
|
||||
|
|
|
@ -616,7 +616,7 @@ static bool amdgpu_atpx_detect(void)
|
|||
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
|
||||
vga_count++;
|
||||
|
||||
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
||||
has_atpx |= amdgpu_atpx_pci_probe_handle(pdev);
|
||||
|
||||
parent_pdev = pci_upstream_bridge(pdev);
|
||||
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
|
||||
|
@ -626,7 +626,7 @@ static bool amdgpu_atpx_detect(void)
|
|||
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
|
||||
vga_count++;
|
||||
|
||||
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
||||
has_atpx |= amdgpu_atpx_pci_probe_handle(pdev);
|
||||
|
||||
parent_pdev = pci_upstream_bridge(pdev);
|
||||
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
|
||||
|
|
|
@ -319,6 +319,9 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
|||
{
|
||||
uint32_t ret;
|
||||
|
||||
if (adev->in_pci_err_recovery)
|
||||
return 0;
|
||||
|
||||
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) &&
|
||||
down_read_trylock(&adev->reset_sem)) {
|
||||
ret = amdgpu_kiq_rreg(adev, reg);
|
||||
|
@ -355,7 +358,11 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
|||
*
|
||||
* Returns the 8 bit value from the offset specified.
|
||||
*/
|
||||
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
|
||||
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
|
||||
{
|
||||
if (adev->in_pci_err_recovery)
|
||||
return 0;
|
||||
|
||||
if (offset < adev->rmmio_size)
|
||||
return (readb(adev->rmmio + offset));
|
||||
BUG();
|
||||
|
@ -376,7 +383,11 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
|
|||
*
|
||||
* Writes the value specified to the offset specified.
|
||||
*/
|
||||
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
|
||||
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
|
||||
{
|
||||
if (adev->in_pci_err_recovery)
|
||||
return;
|
||||
|
||||
if (offset < adev->rmmio_size)
|
||||
writeb(value, adev->rmmio + offset);
|
||||
else
|
||||
|
@ -387,6 +398,9 @@ static inline void amdgpu_mm_wreg_mmio(struct amdgpu_device *adev,
|
|||
uint32_t reg, uint32_t v,
|
||||
uint32_t acc_flags)
|
||||
{
|
||||
if (adev->in_pci_err_recovery)
|
||||
return;
|
||||
|
||||
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
|
||||
|
||||
if ((reg * 4) < adev->rmmio_size)
|
||||
|
@ -414,6 +428,9 @@ static inline void amdgpu_mm_wreg_mmio(struct amdgpu_device *adev,
|
|||
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
uint32_t acc_flags)
|
||||
{
|
||||
if (adev->in_pci_err_recovery)
|
||||
return;
|
||||
|
||||
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) &&
|
||||
down_read_trylock(&adev->reset_sem)) {
|
||||
amdgpu_kiq_wreg(adev, reg, v);
|
||||
|
@ -432,6 +449,9 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
|||
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
uint32_t acc_flags)
|
||||
{
|
||||
if (adev->in_pci_err_recovery)
|
||||
return;
|
||||
|
||||
if (amdgpu_sriov_fullaccess(adev) &&
|
||||
adev->gfx.rlc.funcs &&
|
||||
adev->gfx.rlc.funcs->is_rlcg_access_range) {
|
||||
|
@ -453,6 +473,9 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t
|
|||
*/
|
||||
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
if (adev->in_pci_err_recovery)
|
||||
return 0;
|
||||
|
||||
if ((reg * 4) < adev->rio_mem_size)
|
||||
return ioread32(adev->rio_mem + (reg * 4));
|
||||
else {
|
||||
|
@ -472,6 +495,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
|
|||
*/
|
||||
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
{
|
||||
if (adev->in_pci_err_recovery)
|
||||
return;
|
||||
|
||||
if ((reg * 4) < adev->rio_mem_size)
|
||||
iowrite32(v, adev->rio_mem + (reg * 4));
|
||||
else {
|
||||
|
@ -491,6 +517,9 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|||
*/
|
||||
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
|
||||
{
|
||||
if (adev->in_pci_err_recovery)
|
||||
return 0;
|
||||
|
||||
if (index < adev->doorbell.num_doorbells) {
|
||||
return readl(adev->doorbell.ptr + index);
|
||||
} else {
|
||||
|
@ -511,6 +540,9 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
|
|||
*/
|
||||
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
|
||||
{
|
||||
if (adev->in_pci_err_recovery)
|
||||
return;
|
||||
|
||||
if (index < adev->doorbell.num_doorbells) {
|
||||
writel(v, adev->doorbell.ptr + index);
|
||||
} else {
|
||||
|
@ -529,6 +561,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
|
|||
*/
|
||||
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
|
||||
{
|
||||
if (adev->in_pci_err_recovery)
|
||||
return 0;
|
||||
|
||||
if (index < adev->doorbell.num_doorbells) {
|
||||
return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
|
||||
} else {
|
||||
|
@ -549,6 +584,9 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
|
|||
*/
|
||||
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
|
||||
{
|
||||
if (adev->in_pci_err_recovery)
|
||||
return;
|
||||
|
||||
if (index < adev->doorbell.num_doorbells) {
|
||||
atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
|
||||
} else {
|
||||
|
@ -1256,7 +1294,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
|
|||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
|
||||
pci_set_power_state(dev->pdev, PCI_D0);
|
||||
pci_restore_state(dev->pdev);
|
||||
amdgpu_device_load_pci_state(dev->pdev);
|
||||
r = pci_enable_device(dev->pdev);
|
||||
if (r)
|
||||
DRM_WARN("pci_enable_device failed (%d)\n", r);
|
||||
|
@ -1269,7 +1307,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
|
|||
drm_kms_helper_poll_disable(dev);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
amdgpu_device_suspend(dev, true);
|
||||
pci_save_state(dev->pdev);
|
||||
amdgpu_device_cache_pci_state(dev->pdev);
|
||||
/* Shut down the device */
|
||||
pci_disable_device(dev->pdev);
|
||||
pci_set_power_state(dev->pdev, PCI_D3cold);
|
||||
|
@ -2999,6 +3037,7 @@ static const struct attribute *amdgpu_dev_attributes[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* amdgpu_device_init - initialize the driver
|
||||
*
|
||||
|
@ -3170,13 +3209,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
r = amdgpu_device_get_job_timeout_settings(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
|
||||
return r;
|
||||
goto failed_unmap;
|
||||
}
|
||||
|
||||
/* early init functions */
|
||||
r = amdgpu_device_ip_early_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
goto failed_unmap;
|
||||
|
||||
/* doorbell bar mapping and doorbell index init*/
|
||||
amdgpu_device_doorbell_init(adev);
|
||||
|
@ -3217,6 +3256,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
|
||||
pci_enable_pcie_error_reporting(adev->ddev.pdev);
|
||||
|
||||
/* Post card if necessary */
|
||||
if (amdgpu_device_need_post(adev)) {
|
||||
if (!adev->bios) {
|
||||
|
@ -3359,16 +3400,18 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
flush_delayed_work(&adev->delayed_init_work);
|
||||
|
||||
r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
|
||||
if (r) {
|
||||
if (r)
|
||||
dev_err(adev->dev, "Could not create amdgpu device attr\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PERF_EVENTS))
|
||||
r = amdgpu_pmu_init(adev);
|
||||
if (r)
|
||||
dev_err(adev->dev, "amdgpu_pmu_init failed\n");
|
||||
|
||||
/* Have stored pci confspace at hand for restore in sudden PCI error */
|
||||
if (amdgpu_device_cache_pci_state(adev->pdev))
|
||||
pci_restore_state(pdev);
|
||||
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
|
@ -3376,6 +3419,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
if (boco)
|
||||
vga_switcheroo_fini_domain_pm_ops(adev->dev);
|
||||
|
||||
failed_unmap:
|
||||
iounmap(adev->rmmio);
|
||||
adev->rmmio = NULL;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -3393,6 +3440,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||
flush_delayed_work(&adev->delayed_init_work);
|
||||
adev->shutdown = true;
|
||||
|
||||
kfree(adev->pci_state);
|
||||
|
||||
/* make sure IB test finished before entering exclusive mode
|
||||
* to avoid preemption on IB test
|
||||
* */
|
||||
|
@ -4072,7 +4121,8 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
|||
|
||||
static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
||||
struct list_head *device_list_handle,
|
||||
bool *need_full_reset_arg)
|
||||
bool *need_full_reset_arg,
|
||||
bool skip_hw_reset)
|
||||
{
|
||||
struct amdgpu_device *tmp_adev = NULL;
|
||||
bool need_full_reset = *need_full_reset_arg, vram_lost = false;
|
||||
|
@ -4082,7 +4132,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
|||
* ASIC reset has to be done on all HGMI hive nodes ASAP
|
||||
* to allow proper links negotiation in FW (within 1 sec)
|
||||
*/
|
||||
if (need_full_reset) {
|
||||
if (!skip_hw_reset && need_full_reset) {
|
||||
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
|
||||
/* For XGMI run all resets in parallel to speed up the process */
|
||||
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||
|
@ -4477,7 +4527,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
adev->asic_reset_res = r;
|
||||
} else {
|
||||
r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
|
||||
r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
|
||||
if (r && r == -EAGAIN)
|
||||
goto retry;
|
||||
}
|
||||
|
@ -4705,3 +4755,235 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
if (!ring || !ring->sched.thread)
|
||||
continue;
|
||||
|
||||
cancel_delayed_work_sync(&ring->sched.work_tdr);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_pci_error_detected - Called when a PCI error is detected.
|
||||
* @pdev: PCI device struct
|
||||
* @state: PCI channel state
|
||||
*
|
||||
* Description: Called when a PCI error is detected.
|
||||
*
|
||||
* Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
|
||||
*/
|
||||
pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int i;
|
||||
|
||||
DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
|
||||
|
||||
if (adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||
DRM_WARN("No support for XGMI hive yet...");
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
switch (state) {
|
||||
case pci_channel_io_normal:
|
||||
return PCI_ERS_RESULT_CAN_RECOVER;
|
||||
/* Fatal error, prepare for slot reset */
|
||||
case pci_channel_io_frozen:
|
||||
/*
|
||||
* Cancel and wait for all TDRs in progress if failing to
|
||||
* set adev->in_gpu_reset in amdgpu_device_lock_adev
|
||||
*
|
||||
* Locking adev->reset_sem will prevent any external access
|
||||
* to GPU during PCI error recovery
|
||||
*/
|
||||
while (!amdgpu_device_lock_adev(adev, NULL))
|
||||
amdgpu_cancel_all_tdr(adev);
|
||||
|
||||
/*
|
||||
* Block any work scheduling as we do for regular GPU reset
|
||||
* for the duration of the recovery
|
||||
*/
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
if (!ring || !ring->sched.thread)
|
||||
continue;
|
||||
|
||||
drm_sched_stop(&ring->sched, NULL);
|
||||
}
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
case pci_channel_io_perm_failure:
|
||||
/* Permanent error, prepare for device removal */
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
|
||||
* @pdev: pointer to PCI device
|
||||
*/
|
||||
pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
|
||||
{
|
||||
|
||||
DRM_INFO("PCI error: mmio enabled callback!!\n");
|
||||
|
||||
/* TODO - dump whatever for debugging purposes */
|
||||
|
||||
/* This called only if amdgpu_pci_error_detected returns
|
||||
* PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
|
||||
* works, no need to reset slot.
|
||||
*/
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_pci_slot_reset - Called when PCI slot has been reset.
|
||||
* @pdev: PCI device struct
|
||||
*
|
||||
* Description: This routine is called by the pci error recovery
|
||||
* code after the PCI slot has been reset, just before we
|
||||
* should resume normal operations.
|
||||
*/
|
||||
pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int r, i;
|
||||
bool need_full_reset = true;
|
||||
u32 memsize;
|
||||
struct list_head device_list;
|
||||
|
||||
DRM_INFO("PCI error: slot reset callback!!\n");
|
||||
|
||||
INIT_LIST_HEAD(&device_list);
|
||||
list_add_tail(&adev->gmc.xgmi.head, &device_list);
|
||||
|
||||
/* wait for asic to come out of reset */
|
||||
msleep(500);
|
||||
|
||||
/* Restore PCI confspace */
|
||||
amdgpu_device_load_pci_state(pdev);
|
||||
|
||||
/* confirm ASIC came out of reset */
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
memsize = amdgpu_asic_get_config_memsize(adev);
|
||||
|
||||
if (memsize != 0xffffffff)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
if (memsize == 0xffffffff) {
|
||||
r = -ETIME;
|
||||
goto out;
|
||||
}
|
||||
|
||||
adev->in_pci_err_recovery = true;
|
||||
r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
|
||||
adev->in_pci_err_recovery = false;
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
|
||||
|
||||
out:
|
||||
if (!r) {
|
||||
if (amdgpu_device_cache_pci_state(adev->pdev))
|
||||
pci_restore_state(adev->pdev);
|
||||
|
||||
DRM_INFO("PCIe error recovery succeeded\n");
|
||||
} else {
|
||||
DRM_ERROR("PCIe error recovery failed, err:%d", r);
|
||||
amdgpu_device_unlock_adev(adev);
|
||||
}
|
||||
|
||||
return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_pci_resume() - resume normal ops after PCI reset
|
||||
* @pdev: pointer to PCI device
|
||||
*
|
||||
* Called when the error recovery driver tells us that its
|
||||
* OK to resume normal operation. Use completion to allow
|
||||
* halted scsi ops to resume.
|
||||
*/
|
||||
void amdgpu_pci_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int i;
|
||||
|
||||
|
||||
DRM_INFO("PCI error: resume callback!!\n");
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
if (!ring || !ring->sched.thread)
|
||||
continue;
|
||||
|
||||
|
||||
drm_sched_resubmit_jobs(&ring->sched);
|
||||
drm_sched_start(&ring->sched, true);
|
||||
}
|
||||
|
||||
amdgpu_device_unlock_adev(adev);
|
||||
}
|
||||
|
||||
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int r;
|
||||
|
||||
r = pci_save_state(pdev);
|
||||
if (!r) {
|
||||
kfree(adev->pci_state);
|
||||
|
||||
adev->pci_state = pci_store_saved_state(pdev);
|
||||
|
||||
if (!adev->pci_state) {
|
||||
DRM_ERROR("Failed to store PCI saved state");
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
DRM_WARN("Failed to save PCI state, err:%d\n", r);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int r;
|
||||
|
||||
if (!adev->pci_state)
|
||||
return false;
|
||||
|
||||
r = pci_load_saved_state(pdev, adev->pci_state);
|
||||
|
||||
if (!r) {
|
||||
pci_restore_state(pdev);
|
||||
} else {
|
||||
DRM_WARN("Failed to load PCI state, err:%d\n", r);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -44,9 +44,9 @@ struct amdgpu_df_funcs {
|
|||
void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
|
||||
int is_enable);
|
||||
int is_add);
|
||||
int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
|
||||
int is_disable);
|
||||
int is_remove);
|
||||
void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
|
||||
uint64_t *count);
|
||||
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include <drm/drm_pciids.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
|
@ -1073,8 +1072,16 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
|
||||
|
||||
/* Navi12 */
|
||||
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
|
||||
{0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
|
||||
|
||||
/* Sienna_Cichlid */
|
||||
{0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
|
||||
{0, 0, 0}
|
||||
};
|
||||
|
@ -1102,6 +1109,16 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
|
||||
* however, SME requires an indirect IOMMU mapping because the encryption
|
||||
* bit is beyond the DMA mask of the chip.
|
||||
*/
|
||||
if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
|
||||
dev_info(&pdev->dev,
|
||||
"SME is not compatible with RAVEN\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
if (!amdgpu_si_support) {
|
||||
switch (flags & AMD_ASIC_MASK) {
|
||||
|
@ -1316,7 +1333,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
|||
if (amdgpu_is_atpx_hybrid()) {
|
||||
pci_ignore_hotplug(pdev);
|
||||
} else {
|
||||
pci_save_state(pdev);
|
||||
amdgpu_device_cache_pci_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_ignore_hotplug(pdev);
|
||||
pci_set_power_state(pdev, PCI_D3cold);
|
||||
|
@ -1349,7 +1366,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
|
|||
pci_set_master(pdev);
|
||||
} else {
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
amdgpu_device_load_pci_state(pdev);
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1528,6 +1545,13 @@ static struct drm_driver kms_driver = {
|
|||
.patchlevel = KMS_DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static struct pci_error_handlers amdgpu_pci_err_handler = {
|
||||
.error_detected = amdgpu_pci_error_detected,
|
||||
.mmio_enabled = amdgpu_pci_mmio_enabled,
|
||||
.slot_reset = amdgpu_pci_slot_reset,
|
||||
.resume = amdgpu_pci_resume,
|
||||
};
|
||||
|
||||
static struct pci_driver amdgpu_kms_pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
|
@ -1535,6 +1559,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
|
|||
.remove = amdgpu_pci_remove,
|
||||
.shutdown = amdgpu_pci_shutdown,
|
||||
.driver.pm = &amdgpu_pm_ops,
|
||||
.err_handler = &amdgpu_pci_err_handler,
|
||||
};
|
||||
|
||||
static int __init amdgpu_init(void)
|
||||
|
|
|
@ -693,6 +693,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
|
|||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
struct amdgpu_ring *ring = &kiq->ring;
|
||||
|
||||
if (adev->in_pci_err_recovery)
|
||||
return 0;
|
||||
|
||||
BUG_ON(!ring->funcs->emit_rreg);
|
||||
|
||||
spin_lock_irqsave(&kiq->ring_lock, flags);
|
||||
|
@ -757,6 +760,9 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
|
|||
|
||||
BUG_ON(!ring->funcs->emit_wreg);
|
||||
|
||||
if (adev->in_pci_err_recovery)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&kiq->ring_lock, flags);
|
||||
amdgpu_ring_alloc(ring, 32);
|
||||
amdgpu_ring_emit_wreg(ring, reg, v);
|
||||
|
|
|
@ -282,14 +282,25 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
|||
fw_info->feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_TA:
|
||||
if (query_fw->index > 1)
|
||||
return -EINVAL;
|
||||
if (query_fw->index == 0) {
|
||||
switch (query_fw->index) {
|
||||
case 0:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_xgmi_ucode_version;
|
||||
} else {
|
||||
break;
|
||||
case 1:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_ras_ucode_version;
|
||||
break;
|
||||
case 2:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_hdcp_ucode_version;
|
||||
break;
|
||||
case 3:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_dtm_ucode_version;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_INFO_FW_SDMA:
|
||||
|
@ -1385,13 +1396,31 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
|
|||
fw_info.feature, fw_info.ver);
|
||||
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_TA;
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
query_fw.index = i;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
if (ret)
|
||||
continue;
|
||||
seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
|
||||
i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
|
||||
switch (query_fw.index) {
|
||||
case 0:
|
||||
seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
|
||||
"RAS", fw_info.feature, fw_info.ver);
|
||||
break;
|
||||
case 1:
|
||||
seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
|
||||
"XGMI", fw_info.feature, fw_info.ver);
|
||||
break;
|
||||
case 2:
|
||||
seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
|
||||
"HDCP", fw_info.feature, fw_info.ver);
|
||||
break;
|
||||
case 3:
|
||||
seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
|
||||
"DTM", fw_info.feature, fw_info.ver);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* SMC */
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
|
||||
#include <drm/drm_dp_mst_helper.h>
|
||||
#include "modules/inc/mod_freesync.h"
|
||||
#include "amdgpu_dm_irq_params.h"
|
||||
|
||||
struct amdgpu_bo;
|
||||
struct amdgpu_device;
|
||||
|
@ -404,7 +405,8 @@ struct amdgpu_crtc {
|
|||
struct amdgpu_flip_work *pflip_works;
|
||||
enum amdgpu_flip_status pflip_status;
|
||||
int deferred_flip_completion;
|
||||
u32 last_flip_vblank;
|
||||
/* parameters access from DM IRQ handler */
|
||||
struct dm_irq_params dm_irq_params;
|
||||
/* pll sharing */
|
||||
struct amdgpu_atom_ss ss;
|
||||
bool ss_enabled;
|
||||
|
|
|
@ -178,7 +178,7 @@ static int psp_sw_init(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_NAVI10) {
|
||||
if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
|
||||
ret= psp_sysfs_init(adev);
|
||||
if (ret) {
|
||||
return ret;
|
||||
|
@ -219,6 +219,9 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
|
|||
int i;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
if (psp->adev->in_pci_err_recovery)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
val = RREG32(reg_index);
|
||||
if (check_changed) {
|
||||
|
@ -245,6 +248,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
|||
bool ras_intr = false;
|
||||
bool skip_unsupport = false;
|
||||
|
||||
if (psp->adev->in_pci_err_recovery)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&psp->mutex);
|
||||
|
||||
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
|
||||
|
@ -929,6 +935,7 @@ static int psp_ras_load(struct psp_context *psp)
|
|||
{
|
||||
int ret;
|
||||
struct psp_gfx_cmd_resp *cmd;
|
||||
struct ta_ras_shared_memory *ras_cmd;
|
||||
|
||||
/*
|
||||
* TODO: bypass the loading in sriov for now
|
||||
|
@ -952,11 +959,20 @@ static int psp_ras_load(struct psp_context *psp)
|
|||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||
psp->fence_buf_mc_addr);
|
||||
|
||||
ras_cmd = (struct ta_ras_shared_memory*)psp->ras.ras_shared_buf;
|
||||
|
||||
if (!ret) {
|
||||
psp->ras.ras_initialized = true;
|
||||
psp->ras.session_id = cmd->resp.session_id;
|
||||
|
||||
if (!ras_cmd->ras_status)
|
||||
psp->ras.ras_initialized = true;
|
||||
else
|
||||
dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
|
||||
}
|
||||
|
||||
if (ret || ras_cmd->ras_status)
|
||||
amdgpu_ras_fini(psp->adev);
|
||||
|
||||
kfree(cmd);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1021,6 +1021,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
|||
|
||||
release_sg:
|
||||
kfree(ttm->sg);
|
||||
ttm->sg = NULL;
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1155,7 +1156,12 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
|
||||
* amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
|
||||
* through AGP or GART aperture.
|
||||
*
|
||||
* If bo is accessible through AGP aperture, then use AGP aperture
|
||||
* to access bo; otherwise allocate logical space in GART aperture
|
||||
* and map bo to GART aperture.
|
||||
*/
|
||||
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
||||
{
|
||||
|
|
|
@ -59,7 +59,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
|
|||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @bo: PD/PT to update
|
||||
* @pe: kmap addr of the page entry
|
||||
* @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
|
|
|
@ -155,7 +155,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
|
|||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @bo: PD/PT to update
|
||||
* @pe: addr of the page entry
|
||||
* @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
|
@ -187,7 +187,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
|
|||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @bo: PD/PT to update
|
||||
* @pe: addr of the page entry
|
||||
* @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
|
|
|
@ -455,7 +455,8 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
|
|||
uint32_t *lo_base_addr,
|
||||
uint32_t *hi_base_addr,
|
||||
uint32_t *lo_val,
|
||||
uint32_t *hi_val)
|
||||
uint32_t *hi_val,
|
||||
bool is_enable)
|
||||
{
|
||||
|
||||
uint32_t eventsel, instance, unitmask;
|
||||
|
@ -477,7 +478,8 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
|
|||
instance_5432 = (instance >> 2) & 0xf;
|
||||
instance_76 = (instance >> 6) & 0x3;
|
||||
|
||||
*lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel | (1 << 22);
|
||||
*lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel;
|
||||
*lo_val = is_enable ? *lo_val | (1 << 22) : *lo_val & ~(1 << 22);
|
||||
*hi_val = (instance_76 << 29) | instance_5432;
|
||||
|
||||
DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
|
||||
|
@ -572,14 +574,14 @@ static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
|
||||
int is_enable)
|
||||
int is_add)
|
||||
{
|
||||
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
|
||||
int err = 0, ret = 0;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA20:
|
||||
if (is_enable)
|
||||
if (is_add)
|
||||
return df_v3_6_pmc_add_cntr(adev, config);
|
||||
|
||||
df_v3_6_reset_perfmon_cntr(adev, config);
|
||||
|
@ -589,7 +591,8 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
|
|||
&lo_base_addr,
|
||||
&hi_base_addr,
|
||||
&lo_val,
|
||||
&hi_val);
|
||||
&hi_val,
|
||||
true);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -612,7 +615,7 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
|
|||
}
|
||||
|
||||
static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
|
||||
int is_disable)
|
||||
int is_remove)
|
||||
{
|
||||
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
|
||||
int ret = 0;
|
||||
|
@ -624,15 +627,17 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
|
|||
&lo_base_addr,
|
||||
&hi_base_addr,
|
||||
&lo_val,
|
||||
&hi_val);
|
||||
&hi_val,
|
||||
false);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
df_v3_6_reset_perfmon_cntr(adev, config);
|
||||
|
||||
if (is_disable)
|
||||
if (is_remove) {
|
||||
df_v3_6_reset_perfmon_cntr(adev, config);
|
||||
df_v3_6_pmc_release_cntr(adev, config);
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -3560,7 +3560,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
|
|||
break;
|
||||
}
|
||||
|
||||
if (adev->gfx.cp_fw_write_wait == false)
|
||||
if (!adev->gfx.cp_fw_write_wait)
|
||||
DRM_WARN_ONCE("CP firmware version too old, please update!");
|
||||
}
|
||||
|
||||
|
@ -6980,15 +6980,19 @@ static int gfx_v10_0_hw_fini(void *handle)
|
|||
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
|
||||
if (!adev->in_pci_err_recovery) {
|
||||
#ifndef BRING_UP_DEBUG
|
||||
if (amdgpu_async_gfx_ring) {
|
||||
r = gfx_v10_0_kiq_disable_kgq(adev);
|
||||
if (r)
|
||||
DRM_ERROR("KGQ disable failed\n");
|
||||
}
|
||||
if (amdgpu_async_gfx_ring) {
|
||||
r = gfx_v10_0_kiq_disable_kgq(adev);
|
||||
if (r)
|
||||
DRM_ERROR("KGQ disable failed\n");
|
||||
}
|
||||
#endif
|
||||
if (amdgpu_gfx_disable_kcq(adev))
|
||||
DRM_ERROR("KCQ disable failed\n");
|
||||
if (amdgpu_gfx_disable_kcq(adev))
|
||||
DRM_ERROR("KCQ disable failed\n");
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
gfx_v10_0_cp_gfx_enable(adev, false);
|
||||
/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
|
||||
|
|
|
@ -2800,7 +2800,7 @@ static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
|
|||
uint32_t default_data = 0;
|
||||
|
||||
default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
|
||||
if (enable == true) {
|
||||
if (enable) {
|
||||
/* enable GFXIP control over CGPG */
|
||||
data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
|
||||
if(default_data != data)
|
||||
|
|
|
@ -269,7 +269,6 @@ static const char *mmhub_client_ids_arcturus[][2] = {
|
|||
[14][1] = "HDP",
|
||||
[15][1] = "SDMA0",
|
||||
[32+15][1] = "SDMA1",
|
||||
[32+15][1] = "SDMA1",
|
||||
[64+15][1] = "SDMA2",
|
||||
[96+15][1] = "SDMA3",
|
||||
[128+15][1] = "SDMA4",
|
||||
|
@ -1546,8 +1545,11 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
*/
|
||||
void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->asic_type == CHIP_RAVEN)
|
||||
if (adev->asic_type == CHIP_RAVEN) {
|
||||
WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
|
||||
WARN_ON(adev->gmc.sdpif_register !=
|
||||
RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -262,7 +262,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
|
|||
|
||||
/* Trigger recovery for world switch failure if no TDR */
|
||||
if (amdgpu_device_should_recover_gpu(adev)
|
||||
&& (amdgpu_device_has_job_running(adev) || adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
|
||||
&& (!amdgpu_device_has_job_running(adev) ||
|
||||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -283,7 +283,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
|
|||
|
||||
/* Trigger recovery for world switch failure if no TDR */
|
||||
if (amdgpu_device_should_recover_gpu(adev)
|
||||
&& (amdgpu_device_has_job_running(adev) ||
|
||||
&& (!amdgpu_device_has_job_running(adev) ||
|
||||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||
|
|
|
@ -311,7 +311,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
|
|||
/* disable BM */
|
||||
pci_clear_master(adev->pdev);
|
||||
|
||||
pci_save_state(adev->pdev);
|
||||
amdgpu_device_cache_pci_state(adev->pdev);
|
||||
|
||||
if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
|
||||
dev_info(adev->dev, "GPU smu mode1 reset\n");
|
||||
|
@ -323,7 +323,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
|
|||
|
||||
if (ret)
|
||||
dev_err(adev->dev, "GPU mode1 reset failed\n");
|
||||
pci_restore_state(adev->pdev);
|
||||
amdgpu_device_load_pci_state(adev->pdev);
|
||||
|
||||
/* wait for asic to come out of reset */
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
|
|
|
@ -58,7 +58,7 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
|
|||
MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navy_flounder_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navy_flounder_ta.bin");
|
||||
|
||||
/* address block */
|
||||
#define smnMP1_FIRMWARE_FLAGS 0x3010024
|
||||
|
|
|
@ -1000,7 +1000,7 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
|
|||
sdma[i] = &adev->sdma.instance[i].page;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
|
||||
(unset == false)) {
|
||||
(!unset)) {
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
unset = true;
|
||||
}
|
||||
|
@ -1063,6 +1063,15 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
|
|||
WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
|
||||
}
|
||||
WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
|
||||
|
||||
/*
|
||||
* Enable SDMA utilization. Its only supported on
|
||||
* Arcturus for the moment and firmware version 14
|
||||
* and above.
|
||||
*/
|
||||
if (adev->asic_type == CHIP_ARCTURUS &&
|
||||
adev->sdma.instance[i].fw_version >= 14)
|
||||
WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1080,7 +1089,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
|
|||
u32 f32_cntl;
|
||||
int i;
|
||||
|
||||
if (enable == false) {
|
||||
if (!enable) {
|
||||
sdma_v4_0_gfx_stop(adev);
|
||||
sdma_v4_0_rlc_stop(adev);
|
||||
if (adev->sdma.has_page_queue)
|
||||
|
|
|
@ -616,7 +616,7 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
|
|||
u32 f32_cntl;
|
||||
int i;
|
||||
|
||||
if (enable == false) {
|
||||
if (!enable) {
|
||||
sdma_v5_0_gfx_stop(adev);
|
||||
sdma_v5_0_rlc_stop(adev);
|
||||
}
|
||||
|
|
|
@ -559,7 +559,7 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
|
|||
u32 f32_cntl;
|
||||
int i;
|
||||
|
||||
if (enable == false) {
|
||||
if (!enable) {
|
||||
sdma_v5_2_gfx_stop(adev);
|
||||
sdma_v5_2_rlc_stop(adev);
|
||||
}
|
||||
|
|
|
@ -1339,7 +1339,7 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state)
|
|||
uint32_t temp;
|
||||
|
||||
temp = RREG32(CONFIG_CNTL);
|
||||
if (state == false) {
|
||||
if (!state) {
|
||||
temp &= ~(1<<0);
|
||||
temp |= (1<<1);
|
||||
} else {
|
||||
|
|
|
@ -484,13 +484,13 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
|
|||
/* disable BM */
|
||||
pci_clear_master(adev->pdev);
|
||||
|
||||
pci_save_state(adev->pdev);
|
||||
amdgpu_device_cache_pci_state(adev->pdev);
|
||||
|
||||
ret = psp_gpu_reset(adev);
|
||||
if (ret)
|
||||
dev_err(adev->dev, "GPU mode1 reset failed\n");
|
||||
|
||||
pci_restore_state(adev->pdev);
|
||||
amdgpu_device_load_pci_state(adev->pdev);
|
||||
|
||||
/* wait for asic to come out of reset */
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
|
|
|
@ -1240,8 +1240,8 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
|
|||
break;
|
||||
}
|
||||
|
||||
if (false == int_handled)
|
||||
DRM_ERROR("Unhandled interrupt: %d %d\n",
|
||||
if (!int_handled)
|
||||
DRM_ERROR("Unhandled interrupt: %d %d\n",
|
||||
entry->src_id, entry->src_data[0]);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -746,18 +746,18 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
|
|||
| UVD_SUVD_CGC_GATE__IME_HEVC_MASK
|
||||
| UVD_SUVD_CGC_GATE__EFC_MASK
|
||||
| UVD_SUVD_CGC_GATE__SAOE_MASK
|
||||
| 0x08000000
|
||||
| UVD_SUVD_CGC_GATE__SRE_AV1_MASK
|
||||
| UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
|
||||
| UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
|
||||
| 0x40000000
|
||||
| UVD_SUVD_CGC_GATE__SCM_AV1_MASK
|
||||
| UVD_SUVD_CGC_GATE__SMPA_MASK);
|
||||
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
|
||||
|
||||
data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
|
||||
data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
|
||||
| UVD_SUVD_CGC_GATE2__MPBE1_MASK
|
||||
| 0x00000004
|
||||
| 0x00000008
|
||||
| UVD_SUVD_CGC_GATE2__SIT_AV1_MASK
|
||||
| UVD_SUVD_CGC_GATE2__SDB_AV1_MASK
|
||||
| UVD_SUVD_CGC_GATE2__MPC1_MASK);
|
||||
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
|
||||
|
||||
|
@ -776,8 +776,8 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
|
|||
| UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
|
||||
| UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
|
||||
| UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
|
||||
| 0x00008000
|
||||
| 0x00010000
|
||||
| UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
|
||||
| UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
|
||||
| UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
|
||||
| UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
|
||||
| UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
|
||||
|
@ -892,8 +892,8 @@ static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
|
|||
| UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
|
||||
| UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
|
||||
| UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
|
||||
| 0x00008000
|
||||
| 0x00010000
|
||||
| UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
|
||||
| UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
|
||||
| UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
|
||||
| UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
|
||||
| UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
|
||||
|
|
|
@ -503,8 +503,8 @@ static const struct kfd_device_info *kfd_supported_devices[][2] = {
|
|||
#ifdef KFD_SUPPORT_IOMMU_V2
|
||||
[CHIP_KAVERI] = {&kaveri_device_info, NULL},
|
||||
[CHIP_CARRIZO] = {&carrizo_device_info, NULL},
|
||||
[CHIP_RAVEN] = {&raven_device_info, NULL},
|
||||
#endif
|
||||
[CHIP_RAVEN] = {&raven_device_info, NULL},
|
||||
[CHIP_HAWAII] = {&hawaii_device_info, NULL},
|
||||
[CHIP_TONGA] = {&tonga_device_info, NULL},
|
||||
[CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
|
||||
|
|
|
@ -650,9 +650,10 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
|
|||
goto out;
|
||||
|
||||
pdd = qpd_to_pdd(qpd);
|
||||
pr_info_ratelimited("Evicting PASID 0x%x queues\n",
|
||||
pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
|
||||
pdd->process->pasid);
|
||||
|
||||
pdd->last_evict_timestamp = get_jiffies_64();
|
||||
/* Mark all queues as evicted. Deactivate all active queues on
|
||||
* the qpd.
|
||||
*/
|
||||
|
@ -700,7 +701,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
|
|||
goto out;
|
||||
|
||||
pdd = qpd_to_pdd(qpd);
|
||||
pr_info_ratelimited("Evicting PASID 0x%x queues\n",
|
||||
pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
|
||||
pdd->process->pasid);
|
||||
|
||||
/* Mark all queues as evicted. Deactivate all active queues on
|
||||
|
@ -714,6 +715,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
|
|||
q->properties.is_active = false;
|
||||
decrement_queue_count(dqm, q->properties.type);
|
||||
}
|
||||
pdd->last_evict_timestamp = get_jiffies_64();
|
||||
retval = execute_queues_cpsch(dqm,
|
||||
qpd->is_debug ?
|
||||
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
|
||||
|
@ -732,6 +734,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
|||
struct mqd_manager *mqd_mgr;
|
||||
struct kfd_process_device *pdd;
|
||||
uint64_t pd_base;
|
||||
uint64_t eviction_duration;
|
||||
int retval, ret = 0;
|
||||
|
||||
pdd = qpd_to_pdd(qpd);
|
||||
|
@ -746,7 +749,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
|||
goto out;
|
||||
}
|
||||
|
||||
pr_info_ratelimited("Restoring PASID 0x%x queues\n",
|
||||
pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
|
||||
pdd->process->pasid);
|
||||
|
||||
/* Update PD Base in QPD */
|
||||
|
@ -799,6 +802,8 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
|||
ret = retval;
|
||||
}
|
||||
qpd->evicted = 0;
|
||||
eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
|
||||
atomic64_add(eviction_duration, &pdd->evict_duration_counter);
|
||||
out:
|
||||
if (mm)
|
||||
mmput(mm);
|
||||
|
@ -812,6 +817,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
|
|||
struct queue *q;
|
||||
struct kfd_process_device *pdd;
|
||||
uint64_t pd_base;
|
||||
uint64_t eviction_duration;
|
||||
int retval = 0;
|
||||
|
||||
pdd = qpd_to_pdd(qpd);
|
||||
|
@ -826,7 +832,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
|
|||
goto out;
|
||||
}
|
||||
|
||||
pr_info_ratelimited("Restoring PASID 0x%x queues\n",
|
||||
pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
|
||||
pdd->process->pasid);
|
||||
|
||||
/* Update PD Base in QPD */
|
||||
|
@ -845,6 +851,8 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
|
|||
retval = execute_queues_cpsch(dqm,
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
||||
qpd->evicted = 0;
|
||||
eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
|
||||
atomic64_add(eviction_duration, &pdd->evict_duration_counter);
|
||||
out:
|
||||
dqm_unlock(dqm);
|
||||
return retval;
|
||||
|
@ -1192,6 +1200,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
|
|||
dqm->sched_running = false;
|
||||
dqm_unlock(dqm);
|
||||
|
||||
pm_release_ib(&dqm->packets);
|
||||
|
||||
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
|
||||
pm_uninit(&dqm->packets, hanging);
|
||||
|
||||
|
@ -1302,7 +1312,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
|||
if (q->properties.is_active) {
|
||||
increment_queue_count(dqm, q->properties.type);
|
||||
|
||||
retval = execute_queues_cpsch(dqm,
|
||||
execute_queues_cpsch(dqm,
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
||||
}
|
||||
|
||||
|
@ -1964,6 +1974,7 @@ int kfd_process_vm_fault(struct device_queue_manager *dqm,
|
|||
|
||||
if (!p)
|
||||
return -EINVAL;
|
||||
WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
|
||||
pdd = kfd_get_process_device_data(dqm->dev, p);
|
||||
if (pdd)
|
||||
ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
|
||||
|
|
|
@ -631,7 +631,7 @@ enum kfd_pdd_bound {
|
|||
PDD_BOUND_SUSPENDED,
|
||||
};
|
||||
|
||||
#define MAX_SYSFS_FILENAME_LEN 11
|
||||
#define MAX_SYSFS_FILENAME_LEN 15
|
||||
|
||||
/*
|
||||
* SDMA counter runs at 100MHz frequency.
|
||||
|
@ -692,6 +692,13 @@ struct kfd_process_device {
|
|||
uint64_t sdma_past_activity_counter;
|
||||
struct attribute attr_sdma;
|
||||
char sdma_filename[MAX_SYSFS_FILENAME_LEN];
|
||||
|
||||
/* Eviction activity tracking */
|
||||
uint64_t last_evict_timestamp;
|
||||
atomic64_t evict_duration_counter;
|
||||
struct attribute attr_evict;
|
||||
|
||||
struct kobject *kobj_stats;
|
||||
};
|
||||
|
||||
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
|
||||
|
|
|
@ -344,6 +344,26 @@ static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
|
|||
|
||||
return 0;
|
||||
}
|
||||
static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buffer)
|
||||
{
|
||||
if (strcmp(attr->name, "evicted_ms") == 0) {
|
||||
struct kfd_process_device *pdd = container_of(attr,
|
||||
struct kfd_process_device,
|
||||
attr_evict);
|
||||
uint64_t evict_jiffies;
|
||||
|
||||
evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
|
||||
|
||||
return snprintf(buffer,
|
||||
PAGE_SIZE,
|
||||
"%llu\n",
|
||||
jiffies64_to_msecs(evict_jiffies));
|
||||
} else
|
||||
pr_err("Invalid attribute");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct attribute attr_queue_size = {
|
||||
.name = "size",
|
||||
|
@ -376,6 +396,19 @@ static struct kobj_type procfs_queue_type = {
|
|||
.default_attrs = procfs_queue_attrs,
|
||||
};
|
||||
|
||||
static const struct sysfs_ops procfs_stats_ops = {
|
||||
.show = kfd_procfs_stats_show,
|
||||
};
|
||||
|
||||
static struct attribute *procfs_stats_attrs[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct kobj_type procfs_stats_type = {
|
||||
.sysfs_ops = &procfs_stats_ops,
|
||||
.default_attrs = procfs_stats_attrs,
|
||||
};
|
||||
|
||||
int kfd_procfs_add_queue(struct queue *q)
|
||||
{
|
||||
struct kfd_process *proc;
|
||||
|
@ -417,6 +450,58 @@ static int kfd_sysfs_create_file(struct kfd_process *p, struct attribute *attr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
|
||||
{
|
||||
int ret = 0;
|
||||
struct kfd_process_device *pdd;
|
||||
char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
|
||||
|
||||
if (!p)
|
||||
return -EINVAL;
|
||||
|
||||
if (!p->kobj)
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* Create sysfs files for each GPU:
|
||||
* - proc/<pid>/stats_<gpuid>/
|
||||
* - proc/<pid>/stats_<gpuid>/evicted_ms
|
||||
*/
|
||||
list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
|
||||
struct kobject *kobj_stats;
|
||||
|
||||
snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
|
||||
"stats_%u", pdd->dev->id);
|
||||
kobj_stats = kfd_alloc_struct(kobj_stats);
|
||||
if (!kobj_stats)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = kobject_init_and_add(kobj_stats,
|
||||
&procfs_stats_type,
|
||||
p->kobj,
|
||||
stats_dir_filename);
|
||||
|
||||
if (ret) {
|
||||
pr_warn("Creating KFD proc/stats_%s folder failed",
|
||||
stats_dir_filename);
|
||||
kobject_put(kobj_stats);
|
||||
goto err;
|
||||
}
|
||||
|
||||
pdd->kobj_stats = kobj_stats;
|
||||
pdd->attr_evict.name = "evicted_ms";
|
||||
pdd->attr_evict.mode = KFD_SYSFS_FILE_MODE;
|
||||
sysfs_attr_init(&pdd->attr_evict);
|
||||
ret = sysfs_create_file(kobj_stats, &pdd->attr_evict);
|
||||
if (ret)
|
||||
pr_warn("Creating eviction stats for gpuid %d failed",
|
||||
(int)pdd->dev->id);
|
||||
}
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -660,6 +745,16 @@ struct kfd_process *kfd_create_process(struct file *filep)
|
|||
if (!process->kobj_queues)
|
||||
pr_warn("Creating KFD proc/queues folder failed");
|
||||
|
||||
ret = kfd_procfs_add_sysfs_stats(process);
|
||||
if (ret)
|
||||
pr_warn("Creating sysfs stats dir for pid %d failed",
|
||||
(int)process->lead_thread->pid);
|
||||
|
||||
ret = kfd_procfs_add_sysfs_stats(process);
|
||||
if (ret)
|
||||
pr_warn("Creating sysfs stats dir for pid %d failed",
|
||||
(int)process->lead_thread->pid);
|
||||
|
||||
ret = kfd_procfs_add_sysfs_files(process);
|
||||
if (ret)
|
||||
pr_warn("Creating sysfs usage file for pid %d failed",
|
||||
|
@ -816,6 +911,10 @@ static void kfd_process_wq_release(struct work_struct *work)
|
|||
list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
|
||||
sysfs_remove_file(p->kobj, &pdd->attr_vram);
|
||||
sysfs_remove_file(p->kobj, &pdd->attr_sdma);
|
||||
sysfs_remove_file(p->kobj, &pdd->attr_evict);
|
||||
kobject_del(pdd->kobj_stats);
|
||||
kobject_put(pdd->kobj_stats);
|
||||
pdd->kobj_stats = NULL;
|
||||
}
|
||||
|
||||
kobject_del(p->kobj);
|
||||
|
@ -1125,6 +1224,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
|
|||
pdd->runtime_inuse = false;
|
||||
pdd->vram_usage = 0;
|
||||
pdd->sdma_past_activity_counter = 0;
|
||||
atomic64_set(&pdd->evict_duration_counter, 0);
|
||||
list_add(&pdd->per_device_list, &p->per_device_data);
|
||||
|
||||
/* Init idr used for memory handle translation */
|
||||
|
@ -1488,6 +1588,7 @@ void kfd_suspend_all_processes(void)
|
|||
unsigned int temp;
|
||||
int idx = srcu_read_lock(&kfd_processes_srcu);
|
||||
|
||||
WARN(debug_evictions, "Evicting all processes");
|
||||
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
|
||||
cancel_delayed_work_sync(&p->eviction_work);
|
||||
cancel_delayed_work_sync(&p->restore_work);
|
||||
|
|
|
@ -228,17 +228,14 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
|
|||
return 0;
|
||||
else {
|
||||
struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
|
||||
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
|
||||
acrtc->base.state);
|
||||
|
||||
|
||||
if (acrtc_state->stream == NULL) {
|
||||
if (acrtc->dm_irq_params.stream == NULL) {
|
||||
DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
|
||||
crtc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return dc_stream_get_vblank_counter(acrtc_state->stream);
|
||||
return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -251,10 +248,8 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
|
|||
return -EINVAL;
|
||||
else {
|
||||
struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
|
||||
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
|
||||
acrtc->base.state);
|
||||
|
||||
if (acrtc_state->stream == NULL) {
|
||||
if (acrtc->dm_irq_params.stream == NULL) {
|
||||
DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
|
||||
crtc);
|
||||
return 0;
|
||||
|
@ -264,7 +259,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
|
|||
* TODO rework base driver to use values directly.
|
||||
* for now parse it back into reg-format
|
||||
*/
|
||||
dc_stream_get_scanoutpos(acrtc_state->stream,
|
||||
dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
|
||||
&v_blank_start,
|
||||
&v_blank_end,
|
||||
&h_position,
|
||||
|
@ -323,6 +318,14 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
|
||||
{
|
||||
return acrtc->dm_irq_params.freesync_config.state ==
|
||||
VRR_STATE_ACTIVE_VARIABLE ||
|
||||
acrtc->dm_irq_params.freesync_config.state ==
|
||||
VRR_STATE_ACTIVE_FIXED;
|
||||
}
|
||||
|
||||
static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
|
||||
{
|
||||
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
|
||||
|
@ -343,7 +346,6 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
|||
struct amdgpu_device *adev = irq_params->adev;
|
||||
unsigned long flags;
|
||||
struct drm_pending_vblank_event *e;
|
||||
struct dm_crtc_state *acrtc_state;
|
||||
uint32_t vpos, hpos, v_blank_start, v_blank_end;
|
||||
bool vrr_active;
|
||||
|
||||
|
@ -375,12 +377,11 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
|||
if (!e)
|
||||
WARN_ON(1);
|
||||
|
||||
acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
|
||||
vrr_active = amdgpu_dm_vrr_active(acrtc_state);
|
||||
vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
|
||||
|
||||
/* Fixed refresh rate, or VRR scanout position outside front-porch? */
|
||||
if (!vrr_active ||
|
||||
!dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
|
||||
!dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
|
||||
&v_blank_end, &hpos, &vpos) ||
|
||||
(vpos < v_blank_start)) {
|
||||
/* Update to correct count and vblank timestamp if racing with
|
||||
|
@ -425,7 +426,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
|||
* of pageflip completion, so last_flip_vblank is the forbidden count
|
||||
* for queueing new pageflips if vsync + VRR is enabled.
|
||||
*/
|
||||
amdgpu_crtc->last_flip_vblank =
|
||||
amdgpu_crtc->dm_irq_params.last_flip_vblank =
|
||||
amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
|
||||
|
||||
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
|
||||
|
@ -441,17 +442,17 @@ static void dm_vupdate_high_irq(void *interrupt_params)
|
|||
struct common_irq_params *irq_params = interrupt_params;
|
||||
struct amdgpu_device *adev = irq_params->adev;
|
||||
struct amdgpu_crtc *acrtc;
|
||||
struct dm_crtc_state *acrtc_state;
|
||||
unsigned long flags;
|
||||
int vrr_active;
|
||||
|
||||
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
|
||||
|
||||
if (acrtc) {
|
||||
acrtc_state = to_dm_crtc_state(acrtc->base.state);
|
||||
vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
|
||||
|
||||
DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
|
||||
acrtc->crtc_id,
|
||||
amdgpu_dm_vrr_active(acrtc_state));
|
||||
vrr_active);
|
||||
|
||||
/* Core vblank handling is done here after end of front-porch in
|
||||
* vrr mode, as vblank timestamping will give valid results
|
||||
|
@ -459,22 +460,22 @@ static void dm_vupdate_high_irq(void *interrupt_params)
|
|||
* page-flip completion events that have been queued to us
|
||||
* if a pageflip happened inside front-porch.
|
||||
*/
|
||||
if (amdgpu_dm_vrr_active(acrtc_state)) {
|
||||
if (vrr_active) {
|
||||
drm_crtc_handle_vblank(&acrtc->base);
|
||||
|
||||
/* BTR processing for pre-DCE12 ASICs */
|
||||
if (acrtc_state->stream &&
|
||||
if (acrtc->dm_irq_params.stream &&
|
||||
adev->family < AMDGPU_FAMILY_AI) {
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
mod_freesync_handle_v_update(
|
||||
adev->dm.freesync_module,
|
||||
acrtc_state->stream,
|
||||
&acrtc_state->vrr_params);
|
||||
acrtc->dm_irq_params.stream,
|
||||
&acrtc->dm_irq_params.vrr_params);
|
||||
|
||||
dc_stream_adjust_vmin_vmax(
|
||||
adev->dm.dc,
|
||||
acrtc_state->stream,
|
||||
&acrtc_state->vrr_params.adjust);
|
||||
acrtc->dm_irq_params.stream,
|
||||
&acrtc->dm_irq_params.vrr_params.adjust);
|
||||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
}
|
||||
}
|
||||
|
@ -493,18 +494,17 @@ static void dm_crtc_high_irq(void *interrupt_params)
|
|||
struct common_irq_params *irq_params = interrupt_params;
|
||||
struct amdgpu_device *adev = irq_params->adev;
|
||||
struct amdgpu_crtc *acrtc;
|
||||
struct dm_crtc_state *acrtc_state;
|
||||
unsigned long flags;
|
||||
int vrr_active;
|
||||
|
||||
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
|
||||
if (!acrtc)
|
||||
return;
|
||||
|
||||
acrtc_state = to_dm_crtc_state(acrtc->base.state);
|
||||
vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
|
||||
|
||||
DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
|
||||
amdgpu_dm_vrr_active(acrtc_state),
|
||||
acrtc_state->active_planes);
|
||||
vrr_active, acrtc->dm_irq_params.active_planes);
|
||||
|
||||
/**
|
||||
* Core vblank handling at start of front-porch is only possible
|
||||
|
@ -512,7 +512,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
|
|||
* valid results while done in front-porch. Otherwise defer it
|
||||
* to dm_vupdate_high_irq after end of front-porch.
|
||||
*/
|
||||
if (!amdgpu_dm_vrr_active(acrtc_state))
|
||||
if (!vrr_active)
|
||||
drm_crtc_handle_vblank(&acrtc->base);
|
||||
|
||||
/**
|
||||
|
@ -527,14 +527,16 @@ static void dm_crtc_high_irq(void *interrupt_params)
|
|||
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
|
||||
if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
|
||||
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
|
||||
if (acrtc->dm_irq_params.stream &&
|
||||
acrtc->dm_irq_params.vrr_params.supported &&
|
||||
acrtc->dm_irq_params.freesync_config.state ==
|
||||
VRR_STATE_ACTIVE_VARIABLE) {
|
||||
mod_freesync_handle_v_update(adev->dm.freesync_module,
|
||||
acrtc_state->stream,
|
||||
&acrtc_state->vrr_params);
|
||||
acrtc->dm_irq_params.stream,
|
||||
&acrtc->dm_irq_params.vrr_params);
|
||||
|
||||
dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
|
||||
&acrtc_state->vrr_params.adjust);
|
||||
dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
|
||||
&acrtc->dm_irq_params.vrr_params.adjust);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -549,7 +551,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
|
|||
*/
|
||||
if (adev->family >= AMDGPU_FAMILY_RV &&
|
||||
acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
|
||||
acrtc_state->active_planes == 0) {
|
||||
acrtc->dm_irq_params.active_planes == 0) {
|
||||
if (acrtc->event) {
|
||||
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
|
||||
acrtc->event = NULL;
|
||||
|
@ -878,6 +880,45 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct drm_crtc *crtc;
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector;
|
||||
struct drm_connector_state *conn_state;
|
||||
struct dm_crtc_state *acrtc_state;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct dc_stream_state *stream;
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
|
||||
amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
|
||||
conn_state = connector->state;
|
||||
|
||||
if (!(conn_state && conn_state->crtc))
|
||||
continue;
|
||||
|
||||
crtc = conn_state->crtc;
|
||||
acrtc_state = to_dm_crtc_state(crtc->state);
|
||||
|
||||
if (!(acrtc_state && acrtc_state->stream))
|
||||
continue;
|
||||
|
||||
stream = acrtc_state->stream;
|
||||
|
||||
if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
|
||||
amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
|
||||
amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
|
||||
amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
|
||||
conn_state = drm_atomic_get_connector_state(state, connector);
|
||||
crtc_state = drm_atomic_get_crtc_state(state, crtc);
|
||||
crtc_state->mode_changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct dc_init_data init_data;
|
||||
|
@ -1425,9 +1466,6 @@ static int dm_late_init(void *handle)
|
|||
struct dmcu *dmcu = NULL;
|
||||
bool ret = true;
|
||||
|
||||
if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
|
||||
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
|
||||
|
||||
dmcu = adev->dm.dc->res_pool->dmcu;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
|
@ -3373,9 +3411,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
/* No userspace support. */
|
||||
dm->dc->debug.disable_tri_buf = true;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
kfree(aencoder);
|
||||
|
@ -4689,9 +4724,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
dc_link_get_link_cap(aconnector->dc_link));
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (dsc_caps.is_dsc_supported) {
|
||||
if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
|
||||
/* Set DSC policy according to dsc_clock_en */
|
||||
dc_dsc_policy_set_enable_dsc_when_not_needed(aconnector->dsc_settings.dsc_clock_en);
|
||||
dc_dsc_policy_set_enable_dsc_when_not_needed(
|
||||
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
|
||||
|
||||
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
|
||||
&dsc_caps,
|
||||
|
@ -4701,16 +4737,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
&stream->timing.dsc_cfg))
|
||||
stream->timing.flags.DSC = 1;
|
||||
/* Overwrite the stream flag if DSC is enabled through debugfs */
|
||||
if (aconnector->dsc_settings.dsc_clock_en)
|
||||
if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
|
||||
stream->timing.flags.DSC = 1;
|
||||
|
||||
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_width)
|
||||
stream->timing.dsc_cfg.num_slices_h = DIV_ROUND_UP(stream->timing.h_addressable,
|
||||
aconnector->dsc_settings.dsc_slice_width);
|
||||
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
|
||||
stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
|
||||
|
||||
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_height)
|
||||
stream->timing.dsc_cfg.num_slices_v = DIV_ROUND_UP(stream->timing.v_addressable,
|
||||
aconnector->dsc_settings.dsc_slice_height);
|
||||
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
|
||||
stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
|
||||
|
||||
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
|
||||
stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
|
||||
|
@ -4809,7 +4843,6 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
|
|||
}
|
||||
|
||||
state->active_planes = cur->active_planes;
|
||||
state->vrr_params = cur->vrr_params;
|
||||
state->vrr_infopacket = cur->vrr_infopacket;
|
||||
state->abm_level = cur->abm_level;
|
||||
state->vrr_supported = cur->vrr_supported;
|
||||
|
@ -5427,19 +5460,6 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
|
|||
{
|
||||
}
|
||||
|
||||
static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct drm_device *dev = new_crtc_state->crtc->dev;
|
||||
struct drm_plane *plane;
|
||||
|
||||
drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct drm_atomic_state *state = new_crtc_state->state;
|
||||
|
@ -5503,19 +5523,20 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We require the primary plane to be enabled whenever the CRTC is, otherwise
|
||||
* drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
|
||||
* planes are disabled, which is not supported by the hardware. And there is legacy
|
||||
* userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
|
||||
*/
|
||||
if (state->enable &&
|
||||
!(state->plane_mask & drm_plane_mask(crtc->primary)))
|
||||
return -EINVAL;
|
||||
|
||||
/* In some use cases, like reset, no stream is attached */
|
||||
if (!dm_crtc_state->stream)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We want at least one hardware plane enabled to use
|
||||
* the stream with a cursor enabled.
|
||||
*/
|
||||
if (state->enable && state->active &&
|
||||
does_crtc_have_active_cursor(state) &&
|
||||
dm_crtc_state->active_planes == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
|
||||
return 0;
|
||||
|
||||
|
@ -6862,6 +6883,7 @@ static void update_freesync_state_on_stream(
|
|||
struct mod_vrr_params vrr_params;
|
||||
struct dc_info_packet vrr_infopacket = {0};
|
||||
struct amdgpu_device *adev = dm->adev;
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
|
||||
unsigned long flags;
|
||||
|
||||
if (!new_stream)
|
||||
|
@ -6876,7 +6898,7 @@ static void update_freesync_state_on_stream(
|
|||
return;
|
||||
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
vrr_params = new_crtc_state->vrr_params;
|
||||
vrr_params = acrtc->dm_irq_params.vrr_params;
|
||||
|
||||
if (surface) {
|
||||
mod_freesync_handle_preflip(
|
||||
|
@ -6907,7 +6929,7 @@ static void update_freesync_state_on_stream(
|
|||
&vrr_infopacket);
|
||||
|
||||
new_crtc_state->freesync_timing_changed |=
|
||||
(memcmp(&new_crtc_state->vrr_params.adjust,
|
||||
(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
|
||||
&vrr_params.adjust,
|
||||
sizeof(vrr_params.adjust)) != 0);
|
||||
|
||||
|
@ -6916,10 +6938,10 @@ static void update_freesync_state_on_stream(
|
|||
&vrr_infopacket,
|
||||
sizeof(vrr_infopacket)) != 0);
|
||||
|
||||
new_crtc_state->vrr_params = vrr_params;
|
||||
acrtc->dm_irq_params.vrr_params = vrr_params;
|
||||
new_crtc_state->vrr_infopacket = vrr_infopacket;
|
||||
|
||||
new_stream->adjust = new_crtc_state->vrr_params.adjust;
|
||||
new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
|
||||
new_stream->vrr_infopacket = vrr_infopacket;
|
||||
|
||||
if (new_crtc_state->freesync_vrr_info_changed)
|
||||
|
@ -6931,7 +6953,7 @@ static void update_freesync_state_on_stream(
|
|||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
}
|
||||
|
||||
static void pre_update_freesync_state_on_stream(
|
||||
static void update_stream_irq_parameters(
|
||||
struct amdgpu_display_manager *dm,
|
||||
struct dm_crtc_state *new_crtc_state)
|
||||
{
|
||||
|
@ -6939,6 +6961,7 @@ static void pre_update_freesync_state_on_stream(
|
|||
struct mod_vrr_params vrr_params;
|
||||
struct mod_freesync_config config = new_crtc_state->freesync_config;
|
||||
struct amdgpu_device *adev = dm->adev;
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
|
||||
unsigned long flags;
|
||||
|
||||
if (!new_stream)
|
||||
|
@ -6952,7 +6975,7 @@ static void pre_update_freesync_state_on_stream(
|
|||
return;
|
||||
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
vrr_params = new_crtc_state->vrr_params;
|
||||
vrr_params = acrtc->dm_irq_params.vrr_params;
|
||||
|
||||
if (new_crtc_state->vrr_supported &&
|
||||
config.min_refresh_in_uhz &&
|
||||
|
@ -6969,11 +6992,14 @@ static void pre_update_freesync_state_on_stream(
|
|||
&config, &vrr_params);
|
||||
|
||||
new_crtc_state->freesync_timing_changed |=
|
||||
(memcmp(&new_crtc_state->vrr_params.adjust,
|
||||
&vrr_params.adjust,
|
||||
sizeof(vrr_params.adjust)) != 0);
|
||||
(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
|
||||
&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
|
||||
|
||||
new_crtc_state->vrr_params = vrr_params;
|
||||
new_crtc_state->freesync_config = config;
|
||||
/* Copy state for access from DM IRQ handler */
|
||||
acrtc->dm_irq_params.freesync_config = config;
|
||||
acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
|
||||
acrtc->dm_irq_params.vrr_params = vrr_params;
|
||||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -7197,7 +7223,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
* on late submission of flips.
|
||||
*/
|
||||
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
|
||||
last_flip_vblank = acrtc_attach->last_flip_vblank;
|
||||
last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
|
||||
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -7281,7 +7307,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
|
||||
dc_stream_adjust_vmin_vmax(
|
||||
dm->dc, acrtc_state->stream,
|
||||
&acrtc_state->vrr_params.adjust);
|
||||
&acrtc_attach->dm_irq_params.vrr_params.adjust);
|
||||
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
|
||||
}
|
||||
mutex_lock(&dm->dc_lock);
|
||||
|
@ -7431,34 +7457,6 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
|
|||
struct drm_atomic_state *state,
|
||||
bool nonblock)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We evade vblank and pflip interrupts on CRTCs that are undergoing
|
||||
* a modeset, being disabled, or have no active planes.
|
||||
*
|
||||
* It's done in atomic commit rather than commit tail for now since
|
||||
* some of these interrupt handlers access the current CRTC state and
|
||||
* potentially the stream pointer itself.
|
||||
*
|
||||
* Since the atomic state is swapped within atomic commit and not within
|
||||
* commit tail this would leave to new state (that hasn't been committed yet)
|
||||
* being accesssed from within the handlers.
|
||||
*
|
||||
* TODO: Fix this so we can do this in commit tail and not have to block
|
||||
* in atomic check.
|
||||
*/
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
if (old_crtc_state->active &&
|
||||
(!new_crtc_state->active ||
|
||||
drm_atomic_crtc_needs_modeset(new_crtc_state)))
|
||||
manage_dm_interrupts(adev, acrtc, false);
|
||||
}
|
||||
/*
|
||||
* Add check here for SoC's that support hardware cursor plane, to
|
||||
* unset legacy_cursor_update
|
||||
|
@ -7508,6 +7506,20 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
dc_resource_state_copy_construct_current(dm->dc, dc_state);
|
||||
}
|
||||
|
||||
for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
|
||||
if (old_crtc_state->active &&
|
||||
(!new_crtc_state->active ||
|
||||
drm_atomic_crtc_needs_modeset(new_crtc_state))) {
|
||||
manage_dm_interrupts(adev, acrtc, false);
|
||||
dc_stream_release(dm_old_crtc_state->stream);
|
||||
}
|
||||
}
|
||||
|
||||
/* update changed items */
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
|
@ -7603,7 +7615,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
if (!status)
|
||||
status = dc_stream_get_status_from_state(dc_state,
|
||||
dm_new_crtc_state->stream);
|
||||
|
||||
if (!status)
|
||||
DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
|
||||
else
|
||||
|
@ -7729,8 +7740,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
|
||||
/* Update freesync active state. */
|
||||
pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
|
||||
/* For freesync config update on crtc state and params for irq */
|
||||
update_stream_irq_parameters(dm, dm_new_crtc_state);
|
||||
|
||||
/* Handle vrr on->off / off->on transitions */
|
||||
amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
|
||||
|
@ -7746,10 +7757,15 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
|
||||
if (new_crtc_state->active &&
|
||||
(!old_crtc_state->active ||
|
||||
drm_atomic_crtc_needs_modeset(new_crtc_state))) {
|
||||
dc_stream_retain(dm_new_crtc_state->stream);
|
||||
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
|
||||
manage_dm_interrupts(adev, acrtc, true);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/**
|
||||
* Frontend may have changed so reapply the CRC capture
|
||||
|
@ -7993,8 +8009,6 @@ static void reset_freesync_config_for_crtc(
|
|||
{
|
||||
new_crtc_state->vrr_supported = false;
|
||||
|
||||
memset(&new_crtc_state->vrr_params, 0,
|
||||
sizeof(new_crtc_state->vrr_params));
|
||||
memset(&new_crtc_state->vrr_infopacket, 0,
|
||||
sizeof(new_crtc_state->vrr_infopacket));
|
||||
}
|
||||
|
@ -8565,6 +8579,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
int ret, i;
|
||||
bool lock_and_validation_needed = false;
|
||||
|
||||
amdgpu_check_debugfs_connector_property_change(adev, state);
|
||||
|
||||
ret = drm_atomic_helper_check_modeset(dev, state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
|
|
@ -340,13 +340,19 @@ struct amdgpu_display_manager {
|
|||
* fake encoders used for DP MST.
|
||||
*/
|
||||
struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
|
||||
bool force_timing_sync;
|
||||
bool force_timing_sync;
|
||||
};
|
||||
|
||||
enum dsc_clock_force_state {
|
||||
DSC_CLK_FORCE_DEFAULT = 0,
|
||||
DSC_CLK_FORCE_ENABLE,
|
||||
DSC_CLK_FORCE_DISABLE,
|
||||
};
|
||||
|
||||
struct dsc_preferred_settings {
|
||||
bool dsc_clock_en;
|
||||
uint32_t dsc_slice_width;
|
||||
uint32_t dsc_slice_height;
|
||||
enum dsc_clock_force_state dsc_force_enable;
|
||||
uint32_t dsc_num_slices_v;
|
||||
uint32_t dsc_num_slices_h;
|
||||
uint32_t dsc_bits_per_pixel;
|
||||
};
|
||||
|
||||
|
@ -434,7 +440,6 @@ struct dm_crtc_state {
|
|||
|
||||
bool vrr_supported;
|
||||
struct mod_freesync_config freesync_config;
|
||||
struct mod_vrr_params vrr_params;
|
||||
struct dc_info_packet vrr_infopacket;
|
||||
|
||||
int abm_level;
|
||||
|
|
|
@ -111,7 +111,6 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
|
|||
|
||||
if (*param_nums > max_param_num)
|
||||
*param_nums = max_param_num;
|
||||
;
|
||||
|
||||
wr_buf_ptr = wr_buf; /* reset buf pointer */
|
||||
wr_buf_count = 0; /* number of char already checked */
|
||||
|
@ -265,7 +264,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
|
|||
if (!wr_buf)
|
||||
return -ENOSPC;
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
|
@ -424,7 +423,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
|
|||
if (!wr_buf)
|
||||
return -ENOSPC;
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
|
@ -576,7 +575,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
|
|||
if (!wr_buf)
|
||||
return -ENOSPC;
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
|
@ -1059,12 +1058,17 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
|
|||
*
|
||||
* echo 1 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug
|
||||
*
|
||||
* This function can perform HPD unplug:
|
||||
*
|
||||
* echo 0 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug
|
||||
*
|
||||
*/
|
||||
static ssize_t dp_trigger_hotplug(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct dc_link *link = NULL;
|
||||
struct drm_device *dev = connector->dev;
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
char *wr_buf = NULL;
|
||||
|
@ -1086,11 +1090,13 @@ static ssize_t dp_trigger_hotplug(struct file *f, const char __user *buf,
|
|||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums))
|
||||
¶m_nums)) {
|
||||
kfree(wr_buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (param_nums <= 0) {
|
||||
DRM_DEBUG_DRIVER("user data not be read\n");
|
||||
|
@ -1115,10 +1121,32 @@ static ssize_t dp_trigger_hotplug(struct file *f, const char __user *buf,
|
|||
drm_modeset_unlock_all(dev);
|
||||
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
} else if (param[0] == 0) {
|
||||
if (!aconnector->dc_link)
|
||||
goto unlock;
|
||||
|
||||
link = aconnector->dc_link;
|
||||
|
||||
if (link->local_sink) {
|
||||
dc_sink_release(link->local_sink);
|
||||
link->local_sink = NULL;
|
||||
}
|
||||
|
||||
link->dpcd_sink_count = 0;
|
||||
link->type = dc_connection_none;
|
||||
link->dongle_max_pix_clk = 0;
|
||||
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&aconnector->hpd_lock);
|
||||
}
|
||||
mutex_unlock(&aconnector->hpd_lock);
|
||||
|
||||
kfree(wr_buf);
|
||||
return size;
|
||||
|
@ -1200,9 +1228,14 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
|
|||
*
|
||||
* The write function: dp_dsc_clock_en_write
|
||||
* enables to force DSC on the connector.
|
||||
* User can write to either force enable DSC
|
||||
* User can write to either force enable or force disable DSC
|
||||
* on the next modeset or set it to driver default
|
||||
*
|
||||
* Accepted inputs:
|
||||
* 0 - default DSC enablement policy
|
||||
* 1 - force enable DSC on the connector
|
||||
* 2 - force disable DSC on the connector (might cause fail in atomic_check)
|
||||
*
|
||||
* Writing DSC settings is done with the following command:
|
||||
* - To force enable DSC (you need to specify
|
||||
* connector like DP-1):
|
||||
|
@ -1238,7 +1271,7 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
|
|||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
|
@ -1262,7 +1295,12 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
|
|||
if (!pipe_ctx || !pipe_ctx->stream)
|
||||
goto done;
|
||||
|
||||
aconnector->dsc_settings.dsc_clock_en = param[0];
|
||||
if (param[0] == 1)
|
||||
aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_ENABLE;
|
||||
else if (param[0] == 2)
|
||||
aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DISABLE;
|
||||
else
|
||||
aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DEFAULT;
|
||||
|
||||
done:
|
||||
kfree(wr_buf);
|
||||
|
@ -1387,7 +1425,7 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
|
|||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
|
@ -1411,7 +1449,12 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
|
|||
if (!pipe_ctx || !pipe_ctx->stream)
|
||||
goto done;
|
||||
|
||||
aconnector->dsc_settings.dsc_slice_width = param[0];
|
||||
if (param[0] > 0)
|
||||
aconnector->dsc_settings.dsc_num_slices_h = DIV_ROUND_UP(
|
||||
pipe_ctx->stream->timing.h_addressable,
|
||||
param[0]);
|
||||
else
|
||||
aconnector->dsc_settings.dsc_num_slices_h = 0;
|
||||
|
||||
done:
|
||||
kfree(wr_buf);
|
||||
|
@ -1536,7 +1579,7 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
|
|||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
|
@ -1560,7 +1603,12 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
|
|||
if (!pipe_ctx || !pipe_ctx->stream)
|
||||
goto done;
|
||||
|
||||
aconnector->dsc_settings.dsc_slice_height = param[0];
|
||||
if (param[0] > 0)
|
||||
aconnector->dsc_settings.dsc_num_slices_v = DIV_ROUND_UP(
|
||||
pipe_ctx->stream->timing.v_addressable,
|
||||
param[0]);
|
||||
else
|
||||
aconnector->dsc_settings.dsc_num_slices_v = 0;
|
||||
|
||||
done:
|
||||
kfree(wr_buf);
|
||||
|
@ -1678,7 +1726,7 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
|
|||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
|
@ -2098,6 +2146,7 @@ static const struct {
|
|||
const struct file_operations *fops;
|
||||
} dp_debugfs_entries[] = {
|
||||
{"link_settings", &dp_link_settings_debugfs_fops},
|
||||
{"trigger_hotplug", &dp_trigger_hotplug_debugfs_fops},
|
||||
{"phy_settings", &dp_phy_settings_debugfs_fop},
|
||||
{"test_pattern", &dp_phy_test_pattern_fops},
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_DM_IRQ_PARAMS_H__
|
||||
#define __AMDGPU_DM_IRQ_PARAMS_H__
|
||||
|
||||
struct dm_irq_params {
|
||||
u32 last_flip_vblank;
|
||||
struct mod_vrr_params vrr_params;
|
||||
struct dc_stream_state *stream;
|
||||
int active_planes;
|
||||
struct mod_freesync_config freesync_config;
|
||||
};
|
||||
|
||||
#endif /* __AMDGPU_DM_IRQ_PARAMS_H__ */
|
|
@ -453,9 +453,9 @@ struct dsc_mst_fairness_params {
|
|||
struct dc_dsc_bw_range bw_range;
|
||||
bool compression_possible;
|
||||
struct drm_dp_mst_port *port;
|
||||
bool clock_overwrite;
|
||||
uint32_t slice_width_overwrite;
|
||||
uint32_t slice_height_overwrite;
|
||||
enum dsc_clock_force_state clock_force_enable;
|
||||
uint32_t num_slices_h;
|
||||
uint32_t num_slices_v;
|
||||
uint32_t bpp_overwrite;
|
||||
};
|
||||
|
||||
|
@ -496,15 +496,11 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
|
|||
else
|
||||
params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
|
||||
|
||||
if (params[i].slice_width_overwrite)
|
||||
params[i].timing->dsc_cfg.num_slices_h = DIV_ROUND_UP(
|
||||
params[i].timing->h_addressable,
|
||||
params[i].slice_width_overwrite);
|
||||
if (params[i].num_slices_h)
|
||||
params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h;
|
||||
|
||||
if (params[i].slice_height_overwrite)
|
||||
params[i].timing->dsc_cfg.num_slices_v = DIV_ROUND_UP(
|
||||
params[i].timing->v_addressable,
|
||||
params[i].slice_height_overwrite);
|
||||
if (params[i].num_slices_v)
|
||||
params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v;
|
||||
} else {
|
||||
params[i].timing->flags.DSC = 0;
|
||||
}
|
||||
|
@ -638,7 +634,7 @@ static void try_disable_dsc(struct drm_atomic_state *state,
|
|||
for (i = 0; i < count; i++) {
|
||||
if (vars[i].dsc_enabled
|
||||
&& vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16
|
||||
&& !params[i].clock_overwrite) {
|
||||
&& !params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
|
||||
kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
|
||||
tried[i] = false;
|
||||
remaining_to_try += 1;
|
||||
|
@ -718,11 +714,11 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
params[count].sink = stream->sink;
|
||||
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
|
||||
params[count].port = aconnector->port;
|
||||
params[count].clock_overwrite = aconnector->dsc_settings.dsc_clock_en;
|
||||
if (params[count].clock_overwrite)
|
||||
params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
|
||||
if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
|
||||
debugfs_overwrite = true;
|
||||
params[count].slice_width_overwrite = aconnector->dsc_settings.dsc_slice_width;
|
||||
params[count].slice_height_overwrite = aconnector->dsc_settings.dsc_slice_height;
|
||||
params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
|
||||
params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
|
||||
params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
|
||||
params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
|
||||
dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
|
||||
|
@ -756,7 +752,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
|
||||
/* Try max compression */
|
||||
for (i = 0; i < count; i++) {
|
||||
if (params[i].compression_possible) {
|
||||
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
|
||||
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
|
||||
vars[i].dsc_enabled = true;
|
||||
vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
|
||||
|
|
|
@ -592,9 +592,6 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
|
|||
if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
|
||||
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
|
||||
&wm_with_clock_ranges);
|
||||
else if (adev->smu.ppt_funcs)
|
||||
smu_set_watermarks_for_clock_ranges(&adev->smu,
|
||||
&wm_with_clock_ranges);
|
||||
}
|
||||
|
||||
void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
|
||||
|
@ -667,49 +664,8 @@ static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
|
|||
{
|
||||
const struct dc_context *ctx = pp->dm;
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
|
||||
struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
|
||||
wm_with_clock_ranges.wm_dmif_clocks_ranges;
|
||||
struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
|
||||
wm_with_clock_ranges.wm_mcif_clocks_ranges;
|
||||
int32_t i;
|
||||
|
||||
wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
|
||||
wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
|
||||
|
||||
for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
|
||||
if (ranges->reader_wm_sets[i].wm_inst > 3)
|
||||
wm_dce_clocks[i].wm_set_id = WM_SET_A;
|
||||
else
|
||||
wm_dce_clocks[i].wm_set_id =
|
||||
ranges->reader_wm_sets[i].wm_inst;
|
||||
wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
|
||||
ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
|
||||
wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
|
||||
ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
|
||||
wm_dce_clocks[i].wm_max_mem_clk_in_khz =
|
||||
ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
|
||||
wm_dce_clocks[i].wm_min_mem_clk_in_khz =
|
||||
ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
|
||||
}
|
||||
|
||||
for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
|
||||
if (ranges->writer_wm_sets[i].wm_inst > 3)
|
||||
wm_soc_clocks[i].wm_set_id = WM_SET_A;
|
||||
else
|
||||
wm_soc_clocks[i].wm_set_id =
|
||||
ranges->writer_wm_sets[i].wm_inst;
|
||||
wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
|
||||
ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
|
||||
wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
|
||||
ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
|
||||
wm_soc_clocks[i].wm_max_mem_clk_in_khz =
|
||||
ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
|
||||
wm_soc_clocks[i].wm_min_mem_clk_in_khz =
|
||||
ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
|
||||
}
|
||||
|
||||
smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
|
||||
smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
|
||||
|
||||
return PP_SMU_RESULT_OK;
|
||||
}
|
||||
|
@ -810,7 +766,7 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
|
|||
}
|
||||
|
||||
static enum pp_smu_status pp_nv_set_pstate_handshake_support(
|
||||
struct pp_smu *pp, BOOLEAN pstate_handshake_supported)
|
||||
struct pp_smu *pp, bool pstate_handshake_supported)
|
||||
{
|
||||
const struct dc_context *ctx = pp->dm;
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
|
@ -920,60 +876,8 @@ static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
|
|||
{
|
||||
const struct dc_context *ctx = pp->dm;
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct smu_context *smu = &adev->smu;
|
||||
struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
|
||||
struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
|
||||
wm_with_clock_ranges.wm_dmif_clocks_ranges;
|
||||
struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
|
||||
wm_with_clock_ranges.wm_mcif_clocks_ranges;
|
||||
int32_t i;
|
||||
|
||||
if (!smu->ppt_funcs)
|
||||
return PP_SMU_RESULT_UNSUPPORTED;
|
||||
|
||||
wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
|
||||
wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
|
||||
|
||||
for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
|
||||
if (ranges->reader_wm_sets[i].wm_inst > 3)
|
||||
wm_dce_clocks[i].wm_set_id = WM_SET_A;
|
||||
else
|
||||
wm_dce_clocks[i].wm_set_id =
|
||||
ranges->reader_wm_sets[i].wm_inst;
|
||||
|
||||
wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
|
||||
ranges->reader_wm_sets[i].min_drain_clk_mhz;
|
||||
|
||||
wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
|
||||
ranges->reader_wm_sets[i].max_drain_clk_mhz;
|
||||
|
||||
wm_dce_clocks[i].wm_min_mem_clk_in_khz =
|
||||
ranges->reader_wm_sets[i].min_fill_clk_mhz;
|
||||
|
||||
wm_dce_clocks[i].wm_max_mem_clk_in_khz =
|
||||
ranges->reader_wm_sets[i].max_fill_clk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
|
||||
if (ranges->writer_wm_sets[i].wm_inst > 3)
|
||||
wm_soc_clocks[i].wm_set_id = WM_SET_A;
|
||||
else
|
||||
wm_soc_clocks[i].wm_set_id =
|
||||
ranges->writer_wm_sets[i].wm_inst;
|
||||
wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
|
||||
ranges->writer_wm_sets[i].min_fill_clk_mhz;
|
||||
|
||||
wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
|
||||
ranges->writer_wm_sets[i].max_fill_clk_mhz;
|
||||
|
||||
wm_soc_clocks[i].wm_min_mem_clk_in_khz =
|
||||
ranges->writer_wm_sets[i].min_drain_clk_mhz;
|
||||
|
||||
wm_soc_clocks[i].wm_max_mem_clk_in_khz =
|
||||
ranges->writer_wm_sets[i].max_drain_clk_mhz;
|
||||
}
|
||||
|
||||
smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
|
||||
smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
|
||||
|
||||
return PP_SMU_RESULT_OK;
|
||||
}
|
||||
|
|
|
@ -847,6 +847,73 @@ static enum bp_result bios_parser_get_spread_spectrum_info(
|
|||
return result;
|
||||
}
|
||||
|
||||
static enum bp_result get_soc_bb_info_v4_4(
|
||||
struct bios_parser *bp,
|
||||
struct bp_soc_bb_info *soc_bb_info)
|
||||
{
|
||||
enum bp_result result = BP_RESULT_OK;
|
||||
struct atom_display_controller_info_v4_4 *disp_cntl_tbl = NULL;
|
||||
|
||||
if (!soc_bb_info)
|
||||
return BP_RESULT_BADINPUT;
|
||||
|
||||
if (!DATA_TABLES(dce_info))
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
if (!DATA_TABLES(smu_info))
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_4,
|
||||
DATA_TABLES(dce_info));
|
||||
if (!disp_cntl_tbl)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
soc_bb_info->dram_clock_change_latency_100ns = disp_cntl_tbl->max_mclk_chg_lat;
|
||||
soc_bb_info->dram_sr_enter_exit_latency_100ns = disp_cntl_tbl->max_sr_enter_exit_lat;
|
||||
soc_bb_info->dram_sr_exit_latency_100ns = disp_cntl_tbl->max_sr_exit_lat;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static enum bp_result bios_parser_get_soc_bb_info(
|
||||
struct dc_bios *dcb,
|
||||
struct bp_soc_bb_info *soc_bb_info)
|
||||
{
|
||||
struct bios_parser *bp = BP_FROM_DCB(dcb);
|
||||
enum bp_result result = BP_RESULT_UNSUPPORTED;
|
||||
struct atom_common_table_header *header;
|
||||
struct atom_data_revision tbl_revision;
|
||||
|
||||
if (!soc_bb_info) /* check for bad input */
|
||||
return BP_RESULT_BADINPUT;
|
||||
|
||||
if (!DATA_TABLES(dce_info))
|
||||
return BP_RESULT_UNSUPPORTED;
|
||||
|
||||
header = GET_IMAGE(struct atom_common_table_header,
|
||||
DATA_TABLES(dce_info));
|
||||
get_atom_data_table_revision(header, &tbl_revision);
|
||||
|
||||
switch (tbl_revision.major) {
|
||||
case 4:
|
||||
switch (tbl_revision.minor) {
|
||||
case 1:
|
||||
case 2:
|
||||
case 3:
|
||||
break;
|
||||
case 4:
|
||||
result = get_soc_bb_info_v4_4(bp, soc_bb_info);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static enum bp_result get_embedded_panel_info_v2_1(
|
||||
struct bios_parser *bp,
|
||||
struct embedded_panel_info *info)
|
||||
|
@ -2222,7 +2289,9 @@ static const struct dc_vbios_funcs vbios_funcs = {
|
|||
|
||||
.get_atom_dc_golden_table = bios_get_atom_dc_golden_table,
|
||||
|
||||
.enable_lvtma_control = bios_parser_enable_lvtma_control
|
||||
.enable_lvtma_control = bios_parser_enable_lvtma_control,
|
||||
|
||||
.get_soc_bb_info = bios_parser_get_soc_bb_info,
|
||||
};
|
||||
|
||||
static bool bios_parser2_construct(
|
||||
|
|
|
@ -80,7 +80,7 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = {
|
|||
/* ClocksStatePerformance */
|
||||
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
|
||||
|
||||
int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
|
||||
static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
int dprefclk_wdivider;
|
||||
|
|
|
@ -761,6 +761,7 @@ void rn_clk_mgr_construct(
|
|||
{
|
||||
struct dc_debug_options *debug = &ctx->dc->debug;
|
||||
struct dpm_clocks clock_table = { 0 };
|
||||
enum pp_smu_status status = 0;
|
||||
|
||||
clk_mgr->base.ctx = ctx;
|
||||
clk_mgr->base.funcs = &dcn21_funcs;
|
||||
|
@ -818,8 +819,10 @@ void rn_clk_mgr_construct(
|
|||
clk_mgr->base.bw_params = &rn_bw_params;
|
||||
|
||||
if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
|
||||
pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
|
||||
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
|
||||
status = pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
|
||||
|
||||
if (status == PP_SMU_RESULT_OK &&
|
||||
ctx->dc_bios && ctx->dc_bios->integrated_info) {
|
||||
rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1246,6 +1246,19 @@ void dc_trigger_sync(struct dc *dc, struct dc_state *context)
|
|||
}
|
||||
}
|
||||
|
||||
static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
unsigned int stream_mask = 0;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (context->res_ctx.pipe_ctx[i].stream)
|
||||
stream_mask |= 1 << i;
|
||||
}
|
||||
|
||||
return stream_mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* Applies given context to HW and copy it into current context.
|
||||
* It's up to the user to release the src context afterwards.
|
||||
|
@ -1273,7 +1286,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
|||
dc->optimize_seamless_boot_streams++;
|
||||
}
|
||||
|
||||
if (dc->optimize_seamless_boot_streams == 0)
|
||||
if (context->stream_count > dc->optimize_seamless_boot_streams)
|
||||
dc->hwss.prepare_bandwidth(dc, context);
|
||||
|
||||
disable_dangling_plane(dc, context);
|
||||
|
@ -1355,13 +1368,18 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
|||
|
||||
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
|
||||
|
||||
if (dc->optimize_seamless_boot_streams == 0) {
|
||||
if (context->stream_count > dc->optimize_seamless_boot_streams) {
|
||||
/* Must wait for no flips to be pending before doing optimize bw */
|
||||
wait_for_no_pipes_pending(dc, context);
|
||||
/* pplib is notified if disp_num changed */
|
||||
dc->hwss.optimize_bandwidth(dc, context);
|
||||
}
|
||||
|
||||
context->stream_mask = get_stream_mask(dc, context);
|
||||
|
||||
if (context->stream_mask != dc->current_state->stream_mask)
|
||||
dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
|
||||
|
||||
for (i = 0; i < context->stream_count; i++)
|
||||
context->streams[i]->mode_changed = false;
|
||||
|
||||
|
@ -1481,13 +1499,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
|||
return true;
|
||||
}
|
||||
|
||||
struct dc_state *dc_create_state(struct dc *dc)
|
||||
static void init_state(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
struct dc_state *context = kvzalloc(sizeof(struct dc_state),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!context)
|
||||
return NULL;
|
||||
/* Each context must have their own instance of VBA and in order to
|
||||
* initialize and obtain IP and SOC the base DML instance from DC is
|
||||
* initially copied into every context
|
||||
|
@ -1495,6 +1508,17 @@ struct dc_state *dc_create_state(struct dc *dc)
|
|||
#ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
|
||||
#endif
|
||||
}
|
||||
|
||||
struct dc_state *dc_create_state(struct dc *dc)
|
||||
{
|
||||
struct dc_state *context = kzalloc(sizeof(struct dc_state),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!context)
|
||||
return NULL;
|
||||
|
||||
init_state(dc, context);
|
||||
|
||||
kref_init(&context->refcount);
|
||||
|
||||
|
@ -2415,8 +2439,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
|||
plane_state->triplebuffer_flips = false;
|
||||
if (update_type == UPDATE_TYPE_FAST &&
|
||||
dc->hwss.program_triplebuffer != NULL &&
|
||||
!plane_state->flip_immediate &&
|
||||
!dc->debug.disable_tri_buf) {
|
||||
!plane_state->flip_immediate && dc->debug.enable_tri_buf) {
|
||||
/*triple buffer for VUpdate only*/
|
||||
plane_state->triplebuffer_flips = true;
|
||||
}
|
||||
|
@ -2443,8 +2466,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
|||
|
||||
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
|
||||
|
||||
if (dc->hwss.program_triplebuffer != NULL &&
|
||||
!dc->debug.disable_tri_buf) {
|
||||
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
|
||||
/*turn off triple buffer for full update*/
|
||||
dc->hwss.program_triplebuffer(
|
||||
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
|
||||
|
@ -2509,8 +2531,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
|||
if (pipe_ctx->plane_state != plane_state)
|
||||
continue;
|
||||
/*program triple buffer after lock based on flip type*/
|
||||
if (dc->hwss.program_triplebuffer != NULL &&
|
||||
!dc->debug.disable_tri_buf) {
|
||||
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
|
||||
/*only enable triplebuffer for fast_update*/
|
||||
dc->hwss.program_triplebuffer(
|
||||
dc, pipe_ctx, plane_state->triplebuffer_flips);
|
||||
|
@ -2965,7 +2986,7 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
|
|||
if (enable && !link->psr_settings.psr_allow_active)
|
||||
return dc_link_set_psr_allow_active(link, true, false);
|
||||
else if (!enable && link->psr_settings.psr_allow_active)
|
||||
return dc_link_set_psr_allow_active(link, false, false);
|
||||
return dc_link_set_psr_allow_active(link, false, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3018,4 +3039,10 @@ void dc_lock_memory_clock_frequency(struct dc *dc)
|
|||
if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
|
||||
core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
|
||||
}
|
||||
|
||||
bool dc_is_plane_eligible_for_idle_optimizaitons(struct dc *dc,
|
||||
struct dc_plane_state *plane)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -2946,7 +2946,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
|||
pbn = get_pbn_from_timing(pipe_ctx);
|
||||
avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
|
||||
|
||||
stream_encoder->funcs->set_mst_bandwidth(
|
||||
stream_encoder->funcs->set_throttled_vcp_size(
|
||||
stream_encoder,
|
||||
avg_time_slots_per_mtp);
|
||||
|
||||
|
@ -2974,7 +2974,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
|||
*/
|
||||
|
||||
/* slot X.Y */
|
||||
stream_encoder->funcs->set_mst_bandwidth(
|
||||
stream_encoder->funcs->set_throttled_vcp_size(
|
||||
stream_encoder,
|
||||
avg_time_slots_per_mtp);
|
||||
|
||||
|
|
|
@ -49,14 +49,14 @@ static struct dc_link_settings get_common_supported_link_settings(
|
|||
struct dc_link_settings link_setting_a,
|
||||
struct dc_link_settings link_setting_b);
|
||||
|
||||
static uint32_t get_training_aux_rd_interval(
|
||||
static uint32_t get_eq_training_aux_rd_interval(
|
||||
struct dc_link *link,
|
||||
uint32_t default_wait_in_micro_secs)
|
||||
const struct dc_link_settings *link_settings)
|
||||
{
|
||||
union training_aux_rd_interval training_rd_interval;
|
||||
uint32_t wait_in_micro_secs = 400;
|
||||
|
||||
memset(&training_rd_interval, 0, sizeof(training_rd_interval));
|
||||
|
||||
/* overwrite the delay if rev > 1.1*/
|
||||
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
|
||||
/* DP 1.2 or later - retrieve delay through
|
||||
|
@ -68,10 +68,10 @@ static uint32_t get_training_aux_rd_interval(
|
|||
sizeof(training_rd_interval));
|
||||
|
||||
if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
|
||||
default_wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
|
||||
wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
|
||||
}
|
||||
|
||||
return default_wait_in_micro_secs;
|
||||
return wait_in_micro_secs;
|
||||
}
|
||||
|
||||
static void wait_for_training_aux_rd_interval(
|
||||
|
@ -101,7 +101,16 @@ static void dpcd_set_training_pattern(
|
|||
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
|
||||
}
|
||||
|
||||
static enum dc_dp_training_pattern get_supported_tp(struct dc_link *link)
|
||||
static enum dc_dp_training_pattern decide_cr_training_pattern(
|
||||
const struct dc_link_settings *link_settings)
|
||||
{
|
||||
enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
|
||||
|
||||
return pattern;
|
||||
}
|
||||
|
||||
static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
|
||||
const struct dc_link_settings *link_settings)
|
||||
{
|
||||
enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2;
|
||||
struct encoder_feature_support *features = &link->link_enc->features;
|
||||
|
@ -132,7 +141,6 @@ static void dpcd_set_link_settings(
|
|||
|
||||
union down_spread_ctrl downspread = { {0} };
|
||||
union lane_count_set lane_count_set = { {0} };
|
||||
enum dc_dp_training_pattern dp_tr_pattern;
|
||||
|
||||
downspread.raw = (uint8_t)
|
||||
(lt_settings->link_settings.link_spread);
|
||||
|
@ -143,9 +151,8 @@ static void dpcd_set_link_settings(
|
|||
lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
|
||||
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
|
||||
|
||||
dp_tr_pattern = get_supported_tp(link);
|
||||
|
||||
if (dp_tr_pattern != DP_TRAINING_PATTERN_SEQUENCE_4) {
|
||||
if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
|
||||
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
|
||||
link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
|
||||
}
|
||||
|
@ -373,34 +380,30 @@ static void dpcd_set_lt_pattern_and_lane_settings(
|
|||
static bool is_cr_done(enum dc_lane_count ln_count,
|
||||
union lane_status *dpcd_lane_status)
|
||||
{
|
||||
bool done = true;
|
||||
uint32_t lane;
|
||||
/*LANEx_CR_DONE bits All 1's?*/
|
||||
for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
|
||||
if (!dpcd_lane_status[lane].bits.CR_DONE_0)
|
||||
done = false;
|
||||
return false;
|
||||
}
|
||||
return done;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool is_ch_eq_done(enum dc_lane_count ln_count,
|
||||
union lane_status *dpcd_lane_status,
|
||||
union lane_align_status_updated *lane_status_updated)
|
||||
{
|
||||
bool done = true;
|
||||
uint32_t lane;
|
||||
if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE)
|
||||
done = false;
|
||||
return false;
|
||||
else {
|
||||
for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
|
||||
if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 ||
|
||||
!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
|
||||
done = false;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return done;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void update_drive_settings(
|
||||
|
@ -979,7 +982,7 @@ static void start_clock_recovery_pattern_early(struct dc_link *link,
|
|||
{
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
|
||||
__func__);
|
||||
dp_set_hw_training_pattern(link, DP_TRAINING_PATTERN_SEQUENCE_1, offset);
|
||||
dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
|
||||
dp_set_hw_lane_settings(link, lt_settings, offset);
|
||||
udelay(400);
|
||||
}
|
||||
|
@ -994,7 +997,6 @@ static enum link_training_result perform_clock_recovery_sequence(
|
|||
uint32_t wait_time_microsec;
|
||||
struct link_training_settings req_settings;
|
||||
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
|
||||
enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
|
||||
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
|
||||
union lane_align_status_updated dpcd_lane_status_updated;
|
||||
|
||||
|
@ -1002,7 +1004,7 @@ static enum link_training_result perform_clock_recovery_sequence(
|
|||
retry_count = 0;
|
||||
|
||||
if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
|
||||
dp_set_hw_training_pattern(link, tr_pattern, offset);
|
||||
dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
|
||||
|
||||
/* najeeb - The synaptics MST hub can put the LT in
|
||||
* infinite loop by switching the VS
|
||||
|
@ -1029,7 +1031,7 @@ static enum link_training_result perform_clock_recovery_sequence(
|
|||
dpcd_set_lt_pattern_and_lane_settings(
|
||||
link,
|
||||
lt_settings,
|
||||
tr_pattern,
|
||||
lt_settings->pattern_for_cr,
|
||||
offset);
|
||||
else
|
||||
dpcd_set_lane_settings(
|
||||
|
@ -1113,7 +1115,7 @@ static inline enum link_training_result perform_link_training_int(
|
|||
* TPS4 must be used instead of POST_LT_ADJ_REQ.
|
||||
*/
|
||||
if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
|
||||
get_supported_tp(link) == DP_TRAINING_PATTERN_SEQUENCE_4)
|
||||
lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4)
|
||||
return status;
|
||||
|
||||
if (status == LINK_TRAINING_SUCCESS &&
|
||||
|
@ -1245,17 +1247,21 @@ static void initialize_training_settings(
|
|||
if (overrides->cr_pattern_time != NULL)
|
||||
lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
|
||||
else
|
||||
lt_settings->cr_pattern_time = get_training_aux_rd_interval(link, 100);
|
||||
lt_settings->cr_pattern_time = 100;
|
||||
|
||||
if (overrides->eq_pattern_time != NULL)
|
||||
lt_settings->eq_pattern_time = *overrides->eq_pattern_time;
|
||||
else
|
||||
lt_settings->eq_pattern_time = get_training_aux_rd_interval(link, 400);
|
||||
lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
|
||||
|
||||
if (overrides->pattern_for_cr != NULL)
|
||||
lt_settings->pattern_for_cr = *overrides->pattern_for_cr;
|
||||
else
|
||||
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
|
||||
if (overrides->pattern_for_eq != NULL)
|
||||
lt_settings->pattern_for_eq = *overrides->pattern_for_eq;
|
||||
else
|
||||
lt_settings->pattern_for_eq = get_supported_tp(link);
|
||||
lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
|
||||
|
||||
if (overrides->enhanced_framing != NULL)
|
||||
lt_settings->enhanced_framing = *overrides->enhanced_framing;
|
||||
|
@ -1457,7 +1463,6 @@ bool dc_link_dp_perform_link_training_skip_aux(
|
|||
const struct dc_link_settings *link_setting)
|
||||
{
|
||||
struct link_training_settings lt_settings;
|
||||
enum dc_dp_training_pattern pattern_for_cr = DP_TRAINING_PATTERN_SEQUENCE_1;
|
||||
|
||||
initialize_training_settings(
|
||||
link,
|
||||
|
@ -1468,7 +1473,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
|
|||
/* 1. Perform_clock_recovery_sequence. */
|
||||
|
||||
/* transmit training pattern for clock recovery */
|
||||
dp_set_hw_training_pattern(link, pattern_for_cr, DPRX);
|
||||
dp_set_hw_training_pattern(link, lt_settings.pattern_for_cr, DPRX);
|
||||
|
||||
/* call HWSS to set lane settings*/
|
||||
dp_set_hw_lane_settings(link, <_settings, DPRX);
|
||||
|
@ -1610,6 +1615,9 @@ bool perform_link_training_with_retries(
|
|||
|
||||
for (j = 0; j < attempts; ++j) {
|
||||
|
||||
DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d\n",
|
||||
__func__, (unsigned int)j + 1, attempts);
|
||||
|
||||
dp_enable_link_phy(
|
||||
link,
|
||||
signal,
|
||||
|
@ -1638,6 +1646,9 @@ bool perform_link_training_with_retries(
|
|||
if (j == (attempts - 1))
|
||||
break;
|
||||
|
||||
DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",
|
||||
__func__, (unsigned int)j + 1, attempts);
|
||||
|
||||
dp_disable_link_phy(link, signal);
|
||||
|
||||
msleep(delay_between_attempts);
|
||||
|
|
|
@ -104,6 +104,12 @@ void dp_enable_link_phy(
|
|||
struct clock_source *dp_cs =
|
||||
link->dc->res_pool->dp_clock_source;
|
||||
unsigned int i;
|
||||
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
link->dc->hwss.edp_power_control(link, true);
|
||||
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
|
||||
}
|
||||
|
||||
/* If the current pixel clock source is not DTO(happens after
|
||||
* switching from HDMI passive dongle to DP on the same connector),
|
||||
* switch the pixel clock source to DTO.
|
||||
|
|
|
@ -782,7 +782,13 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
|
|||
|
||||
calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
|
||||
|
||||
data->recout.x = stream->dst.x;
|
||||
/*
|
||||
* Only the leftmost ODM pipe should be offset by a nonzero distance
|
||||
*/
|
||||
if (!pipe_ctx->prev_odm_pipe)
|
||||
data->recout.x = stream->dst.x;
|
||||
else
|
||||
data->recout.x = 0;
|
||||
if (stream->src.x < surf_clip.x)
|
||||
data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
|
||||
/ stream->src.width;
|
||||
|
@ -957,7 +963,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
|
|||
{
|
||||
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
|
||||
const struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct pipe_ctx *odm_pipe = pipe_ctx->prev_odm_pipe;
|
||||
struct pipe_ctx *odm_pipe = pipe_ctx;
|
||||
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
|
||||
struct rect src = pipe_ctx->plane_state->src_rect;
|
||||
int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
|
||||
|
@ -988,21 +994,24 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
|
|||
swap(src.width, src.height);
|
||||
}
|
||||
|
||||
/*modified recout_skip_h calculation due to odm having no recout offset*/
|
||||
while (odm_pipe->prev_odm_pipe) {
|
||||
odm_idx++;
|
||||
odm_pipe = odm_pipe->prev_odm_pipe;
|
||||
}
|
||||
/*odm_pipe is the leftmost pipe in the ODM group*/
|
||||
recout_skip_h = odm_idx * data->recout.width;
|
||||
|
||||
/* Recout matching initial vp offset = recout_offset - (stream dst offset +
|
||||
* ((surf dst offset - stream src offset) * 1/ stream scaling ratio)
|
||||
* - (surf surf_src offset * 1/ full scl ratio))
|
||||
*/
|
||||
recout_skip_h = data->recout.x - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
|
||||
recout_skip_h += odm_pipe->plane_res.scl_data.recout.x
|
||||
- (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
|
||||
* stream->dst.width / stream->src.width -
|
||||
src.x * plane_state->dst_rect.width / src.width
|
||||
* stream->dst.width / stream->src.width);
|
||||
/*modified recout_skip_h calculation due to odm having no recout offset*/
|
||||
while (odm_pipe) {
|
||||
odm_idx++;
|
||||
odm_pipe = odm_pipe->prev_odm_pipe;
|
||||
}
|
||||
if (odm_idx)
|
||||
recout_skip_h += odm_idx * data->recout.width;
|
||||
|
||||
|
||||
recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
|
||||
* stream->dst.height / stream->src.height -
|
||||
|
|
|
@ -123,7 +123,6 @@ static bool dc_stream_construct(struct dc_stream_state *stream,
|
|||
return false;
|
||||
}
|
||||
stream->out_transfer_func->type = TF_TYPE_BYPASS;
|
||||
stream->out_transfer_func->ctx = stream->ctx;
|
||||
|
||||
stream->stream_id = stream->ctx->dc_stream_id_count;
|
||||
stream->ctx->dc_stream_id_count++;
|
||||
|
@ -298,7 +297,7 @@ bool dc_stream_set_cursor_attributes(
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
/* disable idle optimizations while updating cursor */
|
||||
if (dc->idle_optimizations_allowed) {
|
||||
dc->hwss.apply_idle_power_optimizations(dc, false);
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
reset_idle_optimizations = true;
|
||||
}
|
||||
|
||||
|
@ -326,7 +325,7 @@ bool dc_stream_set_cursor_attributes(
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
/* re-enable idle optimizations if necessary */
|
||||
if (reset_idle_optimizations)
|
||||
dc->hwss.apply_idle_power_optimizations(dc, true);
|
||||
dc_allow_idle_optimizations(dc, true);
|
||||
|
||||
#endif
|
||||
return true;
|
||||
|
@ -359,9 +358,8 @@ bool dc_stream_set_cursor_position(
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
|
||||
/* disable idle optimizations if enabling cursor */
|
||||
if (dc->idle_optimizations_allowed &&
|
||||
!stream->cursor_position.enable && position->enable) {
|
||||
dc->hwss.apply_idle_power_optimizations(dc, false);
|
||||
if (dc->idle_optimizations_allowed && !stream->cursor_position.enable && position->enable) {
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
reset_idle_optimizations = true;
|
||||
}
|
||||
|
||||
|
@ -392,7 +390,7 @@ bool dc_stream_set_cursor_position(
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
/* re-enable idle optimizations if necessary */
|
||||
if (reset_idle_optimizations)
|
||||
dc->hwss.apply_idle_power_optimizations(dc, true);
|
||||
dc_allow_idle_optimizations(dc, true);
|
||||
|
||||
#endif
|
||||
return true;
|
||||
|
|
|
@ -48,22 +48,17 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
|
|||
plane_state->in_transfer_func = dc_create_transfer_func();
|
||||
if (plane_state->in_transfer_func != NULL) {
|
||||
plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
|
||||
plane_state->in_transfer_func->ctx = ctx;
|
||||
}
|
||||
plane_state->in_shaper_func = dc_create_transfer_func();
|
||||
if (plane_state->in_shaper_func != NULL) {
|
||||
plane_state->in_shaper_func->type = TF_TYPE_BYPASS;
|
||||
plane_state->in_shaper_func->ctx = ctx;
|
||||
}
|
||||
|
||||
plane_state->lut3d_func = dc_create_3dlut_func();
|
||||
if (plane_state->lut3d_func != NULL) {
|
||||
plane_state->lut3d_func->ctx = ctx;
|
||||
}
|
||||
|
||||
plane_state->blend_tf = dc_create_transfer_func();
|
||||
if (plane_state->blend_tf != NULL) {
|
||||
plane_state->blend_tf->type = TF_TYPE_BYPASS;
|
||||
plane_state->blend_tf->ctx = ctx;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
#include "inc/hw/dmcu.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#define DC_VER "3.2.99"
|
||||
#define DC_VER "3.2.102"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
|
@ -476,7 +476,7 @@ struct dc_debug_options {
|
|||
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
|
||||
#endif
|
||||
unsigned int force_fclk_khz;
|
||||
bool disable_tri_buf;
|
||||
bool enable_tri_buf;
|
||||
bool dmub_offload_enabled;
|
||||
bool dmcub_emulation;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
|
@ -745,7 +745,6 @@ struct dc_transfer_func {
|
|||
enum dc_transfer_func_predefined tf;
|
||||
/* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
|
||||
uint32_t sdr_ref_white_level;
|
||||
struct dc_context *ctx;
|
||||
union {
|
||||
struct pwl_params pwl;
|
||||
struct dc_transfer_func_distributed_points tf_pts;
|
||||
|
@ -772,7 +771,6 @@ struct dc_3dlut {
|
|||
struct tetrahedral_params lut_3d;
|
||||
struct fixed31_32 hdr_multiplier;
|
||||
union dc_3dlut_state state;
|
||||
struct dc_context *ctx;
|
||||
};
|
||||
/*
|
||||
* This structure is filled in by dc_surface_get_status and contains
|
||||
|
@ -1250,6 +1248,9 @@ enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32
|
|||
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
|
||||
bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
|
||||
struct dc_plane_state *plane);
|
||||
|
||||
void dc_allow_idle_optimizations(struct dc *dc, bool allow);
|
||||
|
||||
/*
|
||||
|
|
|
@ -140,6 +140,10 @@ struct dc_vbios_funcs {
|
|||
enum bp_result (*enable_lvtma_control)(
|
||||
struct dc_bios *bios,
|
||||
uint8_t uc_pwr_on);
|
||||
|
||||
enum bp_result (*get_soc_bb_info)(
|
||||
struct dc_bios *dcb,
|
||||
struct bp_soc_bb_info *soc_bb_info);
|
||||
};
|
||||
|
||||
struct bios_registers {
|
||||
|
|
|
@ -132,3 +132,19 @@ void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv)
|
|||
/* Continue spinning so we don't hang the ASIC. */
|
||||
}
|
||||
}
|
||||
|
||||
bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
|
||||
unsigned int stream_mask)
|
||||
{
|
||||
struct dmub_srv *dmub;
|
||||
const uint32_t timeout = 30;
|
||||
|
||||
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
|
||||
return false;
|
||||
|
||||
dmub = dc_dmub_srv->dmub;
|
||||
|
||||
return dmub_srv_send_gpint_command(
|
||||
dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
|
||||
stream_mask, timeout) == DMUB_STATUS_OK;
|
||||
}
|
||||
|
|
|
@ -56,4 +56,6 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
|
|||
|
||||
void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv);
|
||||
|
||||
bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
|
||||
unsigned int stream_mask);
|
||||
#endif /* _DMUB_DC_SRV_H_ */
|
||||
|
|
|
@ -123,6 +123,7 @@ struct dc_link_training_overrides {
|
|||
|
||||
uint16_t *cr_pattern_time;
|
||||
uint16_t *eq_pattern_time;
|
||||
enum dc_dp_training_pattern *pattern_for_cr;
|
||||
enum dc_dp_training_pattern *pattern_for_eq;
|
||||
|
||||
enum dc_link_spread *downspread;
|
||||
|
|
|
@ -122,7 +122,7 @@ struct dc_context {
|
|||
};
|
||||
|
||||
|
||||
#define DC_MAX_EDID_BUFFER_SIZE 1024
|
||||
#define DC_MAX_EDID_BUFFER_SIZE 1280
|
||||
#define DC_EDID_BLOCK_SIZE 128
|
||||
#define MAX_SURFACE_NUM 4
|
||||
#define NUM_PIXEL_FORMATS 10
|
||||
|
|
|
@ -710,7 +710,7 @@ static void dce110_stream_encoder_lvds_set_stream_attribute(
|
|||
ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
|
||||
}
|
||||
|
||||
static void dce110_stream_encoder_set_mst_bandwidth(
|
||||
static void dce110_stream_encoder_set_throttled_vcp_size(
|
||||
struct stream_encoder *enc,
|
||||
struct fixed31_32 avg_time_slots_per_mtp)
|
||||
{
|
||||
|
@ -1621,8 +1621,8 @@ static const struct stream_encoder_funcs dce110_str_enc_funcs = {
|
|||
dce110_stream_encoder_dvi_set_stream_attribute,
|
||||
.lvds_set_stream_attribute =
|
||||
dce110_stream_encoder_lvds_set_stream_attribute,
|
||||
.set_mst_bandwidth =
|
||||
dce110_stream_encoder_set_mst_bandwidth,
|
||||
.set_throttled_vcp_size =
|
||||
dce110_stream_encoder_set_throttled_vcp_size,
|
||||
.update_hdmi_info_packets =
|
||||
dce110_stream_encoder_update_hdmi_info_packets,
|
||||
.stop_hdmi_info_packets =
|
||||
|
|
|
@ -810,37 +810,66 @@ void dce110_edp_power_control(
|
|||
|
||||
if (power_up !=
|
||||
link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl)) {
|
||||
|
||||
unsigned long long current_ts = dm_get_timestamp(ctx);
|
||||
unsigned long long time_since_edp_poweroff_ms =
|
||||
div64_u64(dm_get_elapse_time_in_ns(
|
||||
ctx,
|
||||
current_ts,
|
||||
link->link_trace.time_stamp.edp_poweroff), 1000000);
|
||||
unsigned long long time_since_edp_poweron_ms =
|
||||
div64_u64(dm_get_elapse_time_in_ns(
|
||||
ctx,
|
||||
current_ts,
|
||||
link->link_trace.time_stamp.edp_poweron), 1000000);
|
||||
DC_LOG_HW_RESUME_S3(
|
||||
"%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu",
|
||||
__func__,
|
||||
power_up,
|
||||
current_ts,
|
||||
link->link_trace.time_stamp.edp_poweroff,
|
||||
link->link_trace.time_stamp.edp_poweron,
|
||||
time_since_edp_poweroff_ms,
|
||||
time_since_edp_poweron_ms);
|
||||
|
||||
/* Send VBIOS command to prompt eDP panel power */
|
||||
if (power_up) {
|
||||
unsigned long long current_ts = dm_get_timestamp(ctx);
|
||||
unsigned long long duration_in_ms =
|
||||
div64_u64(dm_get_elapse_time_in_ns(
|
||||
ctx,
|
||||
current_ts,
|
||||
link->link_trace.time_stamp.edp_poweroff), 1000000);
|
||||
unsigned long long wait_time_ms = 0;
|
||||
|
||||
/* max 500ms from LCDVDD off to on */
|
||||
unsigned long long edp_poweroff_time_ms = 500;
|
||||
/* edp requires a min of 500ms from LCDVDD off to on */
|
||||
unsigned long long remaining_min_edp_poweroff_time_ms = 500;
|
||||
|
||||
/* add time defined by a patch, if any (usually patch extra_t12_ms is 0) */
|
||||
if (link->local_sink != NULL)
|
||||
edp_poweroff_time_ms =
|
||||
500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms;
|
||||
if (link->link_trace.time_stamp.edp_poweroff == 0)
|
||||
wait_time_ms = edp_poweroff_time_ms;
|
||||
else if (duration_in_ms < edp_poweroff_time_ms)
|
||||
wait_time_ms = edp_poweroff_time_ms - duration_in_ms;
|
||||
remaining_min_edp_poweroff_time_ms +=
|
||||
link->local_sink->edid_caps.panel_patch.extra_t12_ms;
|
||||
|
||||
if (wait_time_ms) {
|
||||
msleep(wait_time_ms);
|
||||
dm_output_to_console("%s: wait %lld ms to power on eDP.\n",
|
||||
__func__, wait_time_ms);
|
||||
/* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
|
||||
if (link->link_trace.time_stamp.edp_poweroff != 0) {
|
||||
if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms)
|
||||
remaining_min_edp_poweroff_time_ms =
|
||||
remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms;
|
||||
else
|
||||
remaining_min_edp_poweroff_time_ms = 0;
|
||||
}
|
||||
|
||||
if (remaining_min_edp_poweroff_time_ms) {
|
||||
DC_LOG_HW_RESUME_S3(
|
||||
"%s: remaining_min_edp_poweroff_time_ms=%llu: begin wait.\n",
|
||||
__func__, remaining_min_edp_poweroff_time_ms);
|
||||
msleep(remaining_min_edp_poweroff_time_ms);
|
||||
DC_LOG_HW_RESUME_S3(
|
||||
"%s: remaining_min_edp_poweroff_time_ms=%llu: end wait.\n",
|
||||
__func__, remaining_min_edp_poweroff_time_ms);
|
||||
dm_output_to_console("%s: wait %lld ms to power on eDP.\n",
|
||||
__func__, remaining_min_edp_poweroff_time_ms);
|
||||
} else {
|
||||
DC_LOG_HW_RESUME_S3(
|
||||
"%s: remaining_min_edp_poweroff_time_ms=%llu: no wait required.\n",
|
||||
__func__, remaining_min_edp_poweroff_time_ms);
|
||||
}
|
||||
}
|
||||
|
||||
DC_LOG_HW_RESUME_S3(
|
||||
"%s: Panel Power action: %s\n",
|
||||
"%s: BEGIN: Panel Power action: %s\n",
|
||||
__func__, (power_up ? "On":"Off"));
|
||||
|
||||
cntl.action = power_up ?
|
||||
|
@ -864,12 +893,23 @@ void dce110_edp_power_control(
|
|||
|
||||
bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
|
||||
|
||||
DC_LOG_HW_RESUME_S3(
|
||||
"%s: END: Panel Power action: %s bp_result=%u\n",
|
||||
__func__, (power_up ? "On":"Off"),
|
||||
bp_result);
|
||||
|
||||
if (!power_up)
|
||||
/*save driver power off time stamp*/
|
||||
link->link_trace.time_stamp.edp_poweroff = dm_get_timestamp(ctx);
|
||||
else
|
||||
link->link_trace.time_stamp.edp_poweron = dm_get_timestamp(ctx);
|
||||
|
||||
DC_LOG_HW_RESUME_S3(
|
||||
"%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n",
|
||||
__func__,
|
||||
link->link_trace.time_stamp.edp_poweroff,
|
||||
link->link_trace.time_stamp.edp_poweron);
|
||||
|
||||
if (bp_result != BP_RESULT_OK)
|
||||
DC_LOG_ERROR(
|
||||
"%s: Panel Power bp_result: %d\n",
|
||||
|
|
|
@ -325,8 +325,6 @@ bool cm_helper_translate_curve_to_hw_format(
|
|||
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
|
||||
return false;
|
||||
|
||||
PERF_TRACE_CTX(output_tf->ctx);
|
||||
|
||||
corner_points = lut_params->corner_points;
|
||||
rgb_resulted = lut_params->rgb_resulted;
|
||||
hw_points = 0;
|
||||
|
@ -524,8 +522,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
|
|||
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
|
||||
return false;
|
||||
|
||||
PERF_TRACE_CTX(output_tf->ctx);
|
||||
|
||||
corner_points = lut_params->corner_points;
|
||||
rgb_resulted = lut_params->rgb_resulted;
|
||||
hw_points = 0;
|
||||
|
|
|
@ -734,6 +734,9 @@ bool hubp1_is_flip_pending(struct hubp *hubp)
|
|||
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
|
||||
struct dc_plane_address earliest_inuse_address;
|
||||
|
||||
if (hubp && hubp->power_gated)
|
||||
return false;
|
||||
|
||||
REG_GET(DCSURF_FLIP_CONTROL,
|
||||
SURFACE_FLIP_PENDING, &flip_pending);
|
||||
|
||||
|
|
|
@ -2765,7 +2765,7 @@ bool dcn10_disconnect_pipes(
|
|||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
{
|
||||
bool found_stream = false;
|
||||
bool found_pipe = false;
|
||||
int i, j;
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
struct dc_state *old_ctx = dc->current_state;
|
||||
|
@ -2805,26 +2805,28 @@ bool dcn10_disconnect_pipes(
|
|||
old_ctx->res_ctx.pipe_ctx[i].top_pipe) {
|
||||
|
||||
/* Find the top pipe in the new ctx for the bottom pipe that we
|
||||
* want to remove by comparing the streams. If both pipes are being
|
||||
* disabled then do it in the regular pipe programming sequence
|
||||
* want to remove by comparing the streams and planes. If both
|
||||
* pipes are being disabled then do it in the regular pipe
|
||||
* programming sequence
|
||||
*/
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
if (old_ctx->res_ctx.pipe_ctx[i].top_pipe->stream == context->res_ctx.pipe_ctx[j].stream &&
|
||||
old_ctx->res_ctx.pipe_ctx[i].top_pipe->plane_state == context->res_ctx.pipe_ctx[j].plane_state &&
|
||||
!context->res_ctx.pipe_ctx[j].top_pipe &&
|
||||
!context->res_ctx.pipe_ctx[j].update_flags.bits.disable) {
|
||||
found_stream = true;
|
||||
found_pipe = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Disconnect if the top pipe lost it's pipe split
|
||||
if (found_stream && !context->res_ctx.pipe_ctx[j].bottom_pipe) {
|
||||
if (found_pipe && !context->res_ctx.pipe_ctx[j].bottom_pipe) {
|
||||
hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
|
||||
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
|
||||
mpcc_disconnected = true;
|
||||
}
|
||||
}
|
||||
found_stream = false;
|
||||
found_pipe = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -619,7 +619,7 @@ void enc1_stream_encoder_dvi_set_stream_attribute(
|
|||
enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
|
||||
}
|
||||
|
||||
void enc1_stream_encoder_set_mst_bandwidth(
|
||||
void enc1_stream_encoder_set_throttled_vcp_size(
|
||||
struct stream_encoder *enc,
|
||||
struct fixed31_32 avg_time_slots_per_mtp)
|
||||
{
|
||||
|
@ -1616,8 +1616,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
|
|||
enc1_stream_encoder_hdmi_set_stream_attribute,
|
||||
.dvi_set_stream_attribute =
|
||||
enc1_stream_encoder_dvi_set_stream_attribute,
|
||||
.set_mst_bandwidth =
|
||||
enc1_stream_encoder_set_mst_bandwidth,
|
||||
.set_throttled_vcp_size =
|
||||
enc1_stream_encoder_set_throttled_vcp_size,
|
||||
.update_hdmi_info_packets =
|
||||
enc1_stream_encoder_update_hdmi_info_packets,
|
||||
.stop_hdmi_info_packets =
|
||||
|
|
|
@ -588,7 +588,7 @@ void enc1_stream_encoder_dvi_set_stream_attribute(
|
|||
struct dc_crtc_timing *crtc_timing,
|
||||
bool is_dual_link);
|
||||
|
||||
void enc1_stream_encoder_set_mst_bandwidth(
|
||||
void enc1_stream_encoder_set_throttled_vcp_size(
|
||||
struct stream_encoder *enc,
|
||||
struct fixed31_32 avg_time_slots_per_mtp);
|
||||
|
||||
|
|
|
@ -908,6 +908,9 @@ bool hubp2_is_flip_pending(struct hubp *hubp)
|
|||
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
|
||||
struct dc_plane_address earliest_inuse_address;
|
||||
|
||||
if (hubp && hubp->power_gated)
|
||||
return false;
|
||||
|
||||
REG_GET(DCSURF_FLIP_CONTROL,
|
||||
SURFACE_FLIP_PENDING, &flip_pending);
|
||||
|
||||
|
|
|
@ -1251,6 +1251,11 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
|
|||
return;
|
||||
}
|
||||
|
||||
/* Detect plane change */
|
||||
if (old_pipe->plane_state != new_pipe->plane_state) {
|
||||
new_pipe->update_flags.bits.plane_changed = true;
|
||||
}
|
||||
|
||||
/* Detect top pipe only changes */
|
||||
if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
|
||||
/* Detect odm changes */
|
||||
|
@ -1392,6 +1397,7 @@ static void dcn20_update_dchubp_dpp(
|
|||
&pipe_ctx->ttu_regs);
|
||||
|
||||
if (pipe_ctx->update_flags.bits.enable ||
|
||||
pipe_ctx->update_flags.bits.plane_changed ||
|
||||
plane_state->update_flags.bits.bpp_change ||
|
||||
plane_state->update_flags.bits.input_csc_change ||
|
||||
plane_state->update_flags.bits.color_space_change ||
|
||||
|
@ -1414,6 +1420,7 @@ static void dcn20_update_dchubp_dpp(
|
|||
}
|
||||
|
||||
if (pipe_ctx->update_flags.bits.mpcc
|
||||
|| pipe_ctx->update_flags.bits.plane_changed
|
||||
|| plane_state->update_flags.bits.global_alpha_change
|
||||
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
|
||||
// MPCC inst is equal to pipe index in practice
|
||||
|
@ -1515,6 +1522,7 @@ static void dcn20_update_dchubp_dpp(
|
|||
}
|
||||
|
||||
if (pipe_ctx->update_flags.bits.enable ||
|
||||
pipe_ctx->update_flags.bits.plane_changed ||
|
||||
pipe_ctx->update_flags.bits.opp_changed ||
|
||||
plane_state->update_flags.bits.pixel_format_change ||
|
||||
plane_state->update_flags.bits.horizontal_mirror_change ||
|
||||
|
@ -1539,7 +1547,9 @@ static void dcn20_update_dchubp_dpp(
|
|||
hubp->power_gated = false;
|
||||
}
|
||||
|
||||
if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
|
||||
if (pipe_ctx->update_flags.bits.enable ||
|
||||
pipe_ctx->update_flags.bits.plane_changed ||
|
||||
plane_state->update_flags.bits.addr_update)
|
||||
hws->funcs.update_plane_addr(dc, pipe_ctx);
|
||||
|
||||
|
||||
|
@ -1632,16 +1642,26 @@ void dcn20_program_front_end_for_ctx(
|
|||
struct dce_hwseq *hws = dc->hwseq;
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
/* Carry over GSL groups in case the context is changing. */
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *old_pipe_ctx =
|
||||
&dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
|
||||
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
|
||||
if (dc->hwss.program_triplebuffer != NULL &&
|
||||
!dc->debug.disable_tri_buf) {
|
||||
if (pipe_ctx->stream == old_pipe_ctx->stream)
|
||||
pipe_ctx->stream_res.gsl_group =
|
||||
old_pipe_ctx->stream_res.gsl_group;
|
||||
}
|
||||
|
||||
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
|
||||
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
|
||||
/*turn off triple buffer for full update*/
|
||||
dc->hwss.program_triplebuffer(
|
||||
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
|
||||
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1909,9 +1929,9 @@ void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
|||
if (pipe_ctx->stream_res.dsc) {
|
||||
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
|
||||
|
||||
dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true);
|
||||
hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true);
|
||||
while (odm_pipe) {
|
||||
dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true);
|
||||
hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true);
|
||||
odm_pipe = odm_pipe->next_odm_pipe;
|
||||
}
|
||||
}
|
||||
|
@ -1924,9 +1944,9 @@ void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
|||
if (pipe_ctx->stream_res.dsc) {
|
||||
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
|
||||
|
||||
dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false);
|
||||
hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false);
|
||||
while (odm_pipe) {
|
||||
dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false);
|
||||
hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false);
|
||||
odm_pipe = odm_pipe->next_odm_pipe;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -409,8 +409,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
|
|||
},
|
||||
},
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 8.6,
|
||||
.sr_enter_plus_exit_time_us = 10.9,
|
||||
.sr_exit_time_us = 11.6,
|
||||
.sr_enter_plus_exit_time_us = 13.9,
|
||||
.urgent_latency_us = 4.0,
|
||||
.urgent_latency_pixel_data_only_us = 4.0,
|
||||
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
|
||||
|
@ -1075,7 +1075,6 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.disable_pplib_wm_range = false,
|
||||
.scl_reset_length10 = true,
|
||||
.sanity_checks = false,
|
||||
.disable_tri_buf = true,
|
||||
.underflow_assert_delay_us = 0xFFFFFFFF,
|
||||
};
|
||||
|
||||
|
@ -1092,6 +1091,7 @@ static const struct dc_debug_options debug_defaults_diags = {
|
|||
.disable_stutter = true,
|
||||
.scl_reset_length10 = true,
|
||||
.underflow_assert_delay_us = 0xFFFFFFFF,
|
||||
.enable_tri_buf = true,
|
||||
};
|
||||
|
||||
void dcn20_dpp_destroy(struct dpp **dpp)
|
||||
|
|
|
@ -561,8 +561,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
|
|||
enc1_stream_encoder_hdmi_set_stream_attribute,
|
||||
.dvi_set_stream_attribute =
|
||||
enc1_stream_encoder_dvi_set_stream_attribute,
|
||||
.set_mst_bandwidth =
|
||||
enc1_stream_encoder_set_mst_bandwidth,
|
||||
.set_throttled_vcp_size =
|
||||
enc1_stream_encoder_set_throttled_vcp_size,
|
||||
.update_hdmi_info_packets =
|
||||
enc2_stream_encoder_update_hdmi_info_packets,
|
||||
.stop_hdmi_info_packets =
|
||||
|
|
|
@ -894,6 +894,8 @@ static const struct dc_debug_options debug_defaults_diags = {
|
|||
.disable_pplib_wm_range = true,
|
||||
.disable_stutter = true,
|
||||
.disable_48mhz_pwrdwn = true,
|
||||
.disable_psr = true,
|
||||
.enable_tri_buf = true
|
||||
};
|
||||
|
||||
enum dcn20_clk_src_array_id {
|
||||
|
|
|
@ -122,8 +122,6 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|||
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
|
||||
return false;
|
||||
|
||||
PERF_TRACE_CTX(output_tf->ctx);
|
||||
|
||||
corner_points = lut_params->corner_points;
|
||||
rgb_resulted = lut_params->rgb_resulted;
|
||||
hw_points = 0;
|
||||
|
@ -314,8 +312,6 @@ bool cm3_helper_translate_curve_to_degamma_hw_format(
|
|||
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
|
||||
return false;
|
||||
|
||||
PERF_TRACE_CTX(output_tf->ctx);
|
||||
|
||||
corner_points = lut_params->corner_points;
|
||||
rgb_resulted = lut_params->rgb_resulted;
|
||||
hw_points = 0;
|
||||
|
|
|
@ -790,8 +790,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
|
|||
enc3_stream_encoder_hdmi_set_stream_attribute,
|
||||
.dvi_set_stream_attribute =
|
||||
enc3_stream_encoder_dvi_set_stream_attribute,
|
||||
.set_mst_bandwidth =
|
||||
enc1_stream_encoder_set_mst_bandwidth,
|
||||
.set_throttled_vcp_size =
|
||||
enc1_stream_encoder_set_throttled_vcp_size,
|
||||
.update_hdmi_info_packets =
|
||||
enc3_stream_encoder_update_hdmi_info_packets,
|
||||
.stop_hdmi_info_packets =
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
#include "dcn30_dpp.h"
|
||||
#include "dcn10/dcn10_cm_common.h"
|
||||
#include "dcn30_cm_common.h"
|
||||
#include "clk_mgr.h"
|
||||
#include "reg_helper.h"
|
||||
#include "abm.h"
|
||||
#include "clk_mgr.h"
|
||||
|
@ -220,15 +219,13 @@ static void dcn30_set_writeback(
|
|||
struct dc_writeback_info *wb_info,
|
||||
struct dc_state *context)
|
||||
{
|
||||
struct dwbc *dwb;
|
||||
struct mcif_wb *mcif_wb;
|
||||
struct mcif_buf_params *mcif_buf_params;
|
||||
|
||||
ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES);
|
||||
ASSERT(wb_info->wb_enabled);
|
||||
ASSERT(wb_info->mpcc_inst >= 0);
|
||||
ASSERT(wb_info->mpcc_inst < 4);
|
||||
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
|
||||
ASSERT(wb_info->mpcc_inst < dc->res_pool->mpcc_count);
|
||||
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
|
||||
mcif_buf_params = &wb_info->mcif_buf_params;
|
||||
|
||||
|
@ -692,26 +689,23 @@ void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
|
|||
|
||||
bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
|
||||
{
|
||||
unsigned int surface_size;
|
||||
|
||||
if (!dc->ctx->dmub_srv)
|
||||
return false;
|
||||
|
||||
if (enable) {
|
||||
if (dc->current_state
|
||||
&& dc->current_state->stream_count == 1 // single display only
|
||||
&& dc->current_state->stream_status[0].plane_count == 1 // single surface only
|
||||
&& dc->current_state->stream_status[0].plane_states[0]->address.page_table_base.quad_part == 0 // no VM
|
||||
// Only 8 and 16 bit formats
|
||||
&& dc->current_state->stream_status[0].plane_states[0]->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
|
||||
&& dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888) {
|
||||
|
||||
surface_size = dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_pitch *
|
||||
dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_size.height *
|
||||
(dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
|
||||
if (dc->current_state) {
|
||||
int i;
|
||||
|
||||
/* First, check no-memory-requests case */
|
||||
for (i = 0; i < dc->current_state->stream_count; i++) {
|
||||
if (dc->current_state->stream_status[i]
|
||||
.plane_count)
|
||||
/* Fail eligibility on a visible stream */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* No applicable optimizations */
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -179,8 +179,7 @@ void optc3_set_dsc_config(struct timing_generator *optc,
|
|||
|
||||
}
|
||||
|
||||
|
||||
static void optc3_set_odm_bypass(struct timing_generator *optc,
|
||||
void optc3_set_odm_bypass(struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *dc_crtc_timing)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
@ -277,7 +276,7 @@ static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, in
|
|||
*
|
||||
* Options: any time, start of frame, dp start of frame (range timing)
|
||||
*/
|
||||
void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable)
|
||||
static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
uint32_t mode = enable ? 2 : 0;
|
||||
|
|
|
@ -339,4 +339,8 @@ void optc3_set_dsc_config(struct timing_generator *optc,
|
|||
|
||||
void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
|
||||
|
||||
void optc3_set_odm_bypass(struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *dc_crtc_timing);
|
||||
void optc3_tg_init(struct timing_generator *optc);
|
||||
|
||||
#endif /* __DC_OPTC_DCN30_H__ */
|
||||
|
|
|
@ -79,6 +79,7 @@
|
|||
|
||||
#include "reg_helper.h"
|
||||
#include "dce/dmub_abm.h"
|
||||
#include "dce/dmub_psr.h"
|
||||
#include "dce/dce_aux.h"
|
||||
#include "dce/dce_i2c.h"
|
||||
|
||||
|
@ -832,7 +833,7 @@ static const struct dc_plane_cap plane_cap = {
|
|||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_dmcu = true,
|
||||
.disable_dmcu = true, //No DMCU on DCN30
|
||||
.force_abm_enable = false,
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
|
@ -849,10 +850,11 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.underflow_assert_delay_us = 0xFFFFFFFF,
|
||||
.dwb_fi_phase = -1, // -1 = disable,
|
||||
.dmub_command_table = true,
|
||||
.disable_psr = false,
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_diags = {
|
||||
.disable_dmcu = true,
|
||||
.disable_dmcu = true, //No dmcu on DCN30
|
||||
.force_abm_enable = false,
|
||||
.timing_trace = true,
|
||||
.clock_trace = true,
|
||||
|
@ -865,6 +867,8 @@ static const struct dc_debug_options debug_defaults_diags = {
|
|||
.scl_reset_length10 = true,
|
||||
.dwb_fi_phase = -1, // -1 = disable
|
||||
.dmub_command_table = true,
|
||||
.disable_psr = true,
|
||||
.enable_tri_buf = true,
|
||||
};
|
||||
|
||||
void dcn30_dpp_destroy(struct dpp **dpp)
|
||||
|
@ -1312,6 +1316,9 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool)
|
|||
dce_abm_destroy(&pool->base.multiple_abms[i]);
|
||||
}
|
||||
|
||||
if (pool->base.psr != NULL)
|
||||
dmub_psr_destroy(&pool->base.psr);
|
||||
|
||||
if (pool->base.dccg != NULL)
|
||||
dcn_dccg_destroy(&pool->base.dccg);
|
||||
}
|
||||
|
@ -1821,6 +1828,22 @@ static bool init_soc_bounding_box(struct dc *dc,
|
|||
loaded_ip->max_num_dpp = pool->base.pipe_count;
|
||||
loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
|
||||
dcn20_patch_bounding_box(dc, loaded_bb);
|
||||
|
||||
if (!bb && dc->ctx->dc_bios->funcs->get_soc_bb_info) {
|
||||
struct bp_soc_bb_info bb_info = {0};
|
||||
|
||||
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
|
||||
if (bb_info.dram_clock_change_latency_100ns > 0)
|
||||
dcn3_0_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
|
||||
|
||||
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
|
||||
dcn3_0_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
|
||||
|
||||
if (bb_info.dram_sr_exit_latency_100ns > 0)
|
||||
dcn3_0_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2203,6 +2226,9 @@ static void dcn30_calculate_wm(
|
|||
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
|
||||
context->perf_params.stutter_period_us =
|
||||
context->bw_ctx.dml.vba.StutterPeriod;
|
||||
|
||||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
@ -2623,6 +2649,14 @@ static bool dcn30_resource_construct(
|
|||
}
|
||||
}
|
||||
pool->base.timing_generator_count = i;
|
||||
/* PSR */
|
||||
pool->base.psr = dmub_psr_create(ctx);
|
||||
|
||||
if (pool->base.psr == NULL) {
|
||||
dm_error("DC: failed to create PSR obj!\n");
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto create_fail;
|
||||
}
|
||||
|
||||
/* ABM */
|
||||
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
|
||||
|
|
|
@ -30,8 +30,6 @@
|
|||
* interface to PPLIB/SMU to setup clocks and pstate requirements on SoC
|
||||
*/
|
||||
|
||||
typedef bool BOOLEAN;
|
||||
|
||||
enum pp_smu_ver {
|
||||
/*
|
||||
* PP_SMU_INTERFACE_X should be interpreted as the interface defined
|
||||
|
@ -240,7 +238,7 @@ struct pp_smu_funcs_nv {
|
|||
* DC hardware
|
||||
*/
|
||||
enum pp_smu_status (*set_pstate_handshake_support)(struct pp_smu *pp,
|
||||
BOOLEAN pstate_handshake_supported);
|
||||
bool pstate_handshake_supported);
|
||||
};
|
||||
|
||||
#define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8
|
||||
|
|
|
@ -2635,15 +2635,14 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
|
|||
}
|
||||
|
||||
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
|
||||
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
|
||||
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
|
||||
mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
|
||||
mode_lib->vba.DRAMClockChangeWatermark += 25;
|
||||
|
||||
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
|
||||
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
|
||||
if (mode_lib->vba.DRAMClockChangeWatermark >
|
||||
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
|
||||
mode_lib->vba.MinTTUVBlank[k] += 25;
|
||||
}
|
||||
if (mode_lib->vba.DRAMClockChangeWatermark >
|
||||
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
|
||||
mode_lib->vba.MinTTUVBlank[k] += 25;
|
||||
}
|
||||
|
||||
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
|
||||
|
|
|
@ -33,7 +33,7 @@ struct display_mode_lib;
|
|||
|
||||
// Function: dml_rq_dlg_get_rq_reg
|
||||
// Main entry point for test to get the register values out of this DML class.
|
||||
// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
|
||||
// This function calls <get_rq_param> and <extract_rq_regs> functions to calculate
|
||||
// and then populate the rq_regs struct
|
||||
// Input:
|
||||
// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
|
||||
|
|
|
@ -33,7 +33,7 @@ struct display_mode_lib;
|
|||
|
||||
// Function: dml_rq_dlg_get_rq_reg
|
||||
// Main entry point for test to get the register values out of this DML class.
|
||||
// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
|
||||
// This function calls <get_rq_param> and <extract_rq_regs> functions to calculate
|
||||
// and then populate the rq_regs struct
|
||||
// Input:
|
||||
// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
|
||||
|
|
|
@ -1294,7 +1294,7 @@ static unsigned int CalculateVMAndRowBytes(
|
|||
unsigned int MacroTileHeight;
|
||||
unsigned int ExtraDPDEBytesFrame;
|
||||
unsigned int PDEAndMetaPTEBytesFrame;
|
||||
unsigned int PixelPTEReqHeightPTEs;
|
||||
unsigned int PixelPTEReqHeightPTEs = 0;
|
||||
|
||||
if (DCCEnable == true) {
|
||||
*MetaRequestHeight = 8 * BlockHeight256Bytes;
|
||||
|
|
|
@ -597,7 +597,8 @@ static void CalculateStutterEfficiency(
|
|||
double meta_row_bw[],
|
||||
double dpte_row_bw[],
|
||||
double *StutterEfficiencyNotIncludingVBlank,
|
||||
double *StutterEfficiency);
|
||||
double *StutterEfficiency,
|
||||
double *StutterPeriodOut);
|
||||
|
||||
static void CalculateSwathAndDETConfiguration(
|
||||
bool ForceSingleDPP,
|
||||
|
@ -3134,7 +3135,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
|
|||
v->meta_row_bw,
|
||||
v->dpte_row_bw,
|
||||
&v->StutterEfficiencyNotIncludingVBlank,
|
||||
&v->StutterEfficiency);
|
||||
&v->StutterEfficiency,
|
||||
&v->StutterPeriod);
|
||||
}
|
||||
|
||||
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
|
||||
|
@ -3235,7 +3237,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
|
|||
*BytePerPixelDETC = 0;
|
||||
*BytePerPixelY = 4;
|
||||
*BytePerPixelC = 0;
|
||||
} else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
|
||||
} else if (SourcePixelFormat == dm_444_16) {
|
||||
*BytePerPixelDETY = 2;
|
||||
*BytePerPixelDETC = 0;
|
||||
*BytePerPixelY = 2;
|
||||
|
@ -5305,7 +5307,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
|||
ViewportExceedsSurface = true;
|
||||
|
||||
if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
|
||||
&& v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
|
||||
&& v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
|
||||
if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) {
|
||||
ViewportExceedsSurface = true;
|
||||
}
|
||||
|
@ -5515,7 +5517,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
|
|||
if (WritebackPixelFormat[k] == dm_444_64) {
|
||||
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
|
||||
}
|
||||
if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave || mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
|
||||
if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
|
||||
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding * 2;
|
||||
}
|
||||
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - mode_lib->vba.WritebackDRAMClockChangeWatermark;
|
||||
|
@ -6151,7 +6153,8 @@ static void CalculateStutterEfficiency(
|
|||
double meta_row_bw[],
|
||||
double dpte_row_bw[],
|
||||
double *StutterEfficiencyNotIncludingVBlank,
|
||||
double *StutterEfficiency)
|
||||
double *StutterEfficiency,
|
||||
double *StutterPeriodOut)
|
||||
{
|
||||
double FullDETBufferingTimeY[DC__NUM_DPP__MAX] = { 0 };
|
||||
double FrameTimeForMinFullDETBufferingTime = 0;
|
||||
|
@ -6262,6 +6265,9 @@ static void CalculateStutterEfficiency(
|
|||
}
|
||||
|
||||
*StutterEfficiency = (*StutterEfficiencyNotIncludingVBlank / 100.0 * (FrameTimeForMinFullDETBufferingTime - SmallestVBlank) + SmallestVBlank) / FrameTimeForMinFullDETBufferingTime * 100;
|
||||
|
||||
if (StutterPeriodOut)
|
||||
*StutterPeriodOut = StutterPeriod;
|
||||
}
|
||||
|
||||
static void CalculateSwathAndDETConfiguration(
|
||||
|
|
|
@ -279,7 +279,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
|
|||
*BytePerPixelDETC = 0;
|
||||
*BytePerPixelY = 4;
|
||||
*BytePerPixelC = 0;
|
||||
} else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
|
||||
} else if (SourcePixelFormat == dm_444_16) {
|
||||
*BytePerPixelDETY = 2;
|
||||
*BytePerPixelDETC = 0;
|
||||
*BytePerPixelY = 2;
|
||||
|
|
|
@ -32,7 +32,7 @@ struct display_mode_lib;
|
|||
|
||||
// Function: dml_rq_dlg_get_rq_reg
|
||||
// Main entry point for test to get the register values out of this DML class.
|
||||
// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
|
||||
// This function calls <get_rq_param> and <extract_rq_regs> functions to calculate
|
||||
// and then populate the rq_regs struct
|
||||
// Input:
|
||||
// pipe_param - pipe source configuration (e.g. vp, pitch, scaling, dest, etc.)
|
||||
|
|
|
@ -162,7 +162,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
|
|||
}
|
||||
|
||||
|
||||
/* fucntion table */
|
||||
/* function table */
|
||||
static const struct hw_factory_funcs funcs = {
|
||||
.init_ddc_data = dal_hw_ddc_init,
|
||||
.init_generic = NULL,
|
||||
|
|
|
@ -194,7 +194,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
|
|||
}
|
||||
|
||||
|
||||
/* fucntion table */
|
||||
/* function table */
|
||||
static const struct hw_factory_funcs funcs = {
|
||||
.init_ddc_data = dal_hw_ddc_init,
|
||||
.init_generic = dal_hw_generic_init,
|
||||
|
|
|
@ -221,7 +221,7 @@ static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
|
|||
generic->base.regs = &generic_regs[en].gpio;
|
||||
}
|
||||
|
||||
/* fucntion table */
|
||||
/* function table */
|
||||
static const struct hw_factory_funcs funcs = {
|
||||
.init_ddc_data = dal_hw_ddc_init,
|
||||
.init_generic = dal_hw_generic_init,
|
||||
|
|
|
@ -202,7 +202,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
|
|||
}
|
||||
|
||||
|
||||
/* fucntion table */
|
||||
/* function table */
|
||||
static const struct hw_factory_funcs funcs = {
|
||||
.init_ddc_data = dal_hw_ddc_init,
|
||||
.init_generic = dal_hw_generic_init,
|
||||
|
|
|
@ -218,7 +218,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
|
|||
}
|
||||
|
||||
|
||||
/* fucntion table */
|
||||
/* function table */
|
||||
static const struct hw_factory_funcs funcs = {
|
||||
.init_ddc_data = dal_hw_ddc_init,
|
||||
.init_generic = dal_hw_generic_init,
|
||||
|
|
|
@ -300,6 +300,7 @@ union pipe_update_flags {
|
|||
uint32_t gamut_remap : 1;
|
||||
uint32_t scaler : 1;
|
||||
uint32_t viewport : 1;
|
||||
uint32_t plane_changed : 1;
|
||||
} bits;
|
||||
uint32_t raw;
|
||||
};
|
||||
|
@ -396,6 +397,7 @@ struct dc_state {
|
|||
struct dc_stream_state *streams[MAX_PIPES];
|
||||
struct dc_stream_status stream_status[MAX_PIPES];
|
||||
uint8_t stream_count;
|
||||
uint8_t stream_mask;
|
||||
|
||||
struct resource_context res_ctx;
|
||||
|
||||
|
@ -410,6 +412,10 @@ struct dc_state {
|
|||
struct clk_mgr *clk_mgr;
|
||||
|
||||
struct kref refcount;
|
||||
|
||||
struct {
|
||||
unsigned int stutter_period_us;
|
||||
} perf_params;
|
||||
};
|
||||
|
||||
#endif /* _CORE_TYPES_H_ */
|
||||
|
|
|
@ -143,7 +143,7 @@ struct stream_encoder_funcs {
|
|||
struct stream_encoder *enc,
|
||||
struct dc_crtc_timing *crtc_timing);
|
||||
|
||||
void (*set_mst_bandwidth)(
|
||||
void (*set_throttled_vcp_size)(
|
||||
struct stream_encoder *enc,
|
||||
struct fixed31_32 avg_time_slots_per_mtp);
|
||||
|
||||
|
|
|
@ -46,9 +46,10 @@ static void virtual_stream_encoder_dvi_set_stream_attribute(
|
|||
struct dc_crtc_timing *crtc_timing,
|
||||
bool is_dual_link) {}
|
||||
|
||||
static void virtual_stream_encoder_set_mst_bandwidth(
|
||||
static void virtual_stream_encoder_set_throttled_vcp_size(
|
||||
struct stream_encoder *enc,
|
||||
struct fixed31_32 avg_time_slots_per_mtp) {}
|
||||
struct fixed31_32 avg_time_slots_per_mtp)
|
||||
{}
|
||||
|
||||
static void virtual_stream_encoder_update_hdmi_info_packets(
|
||||
struct stream_encoder *enc,
|
||||
|
@ -107,8 +108,8 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = {
|
|||
virtual_stream_encoder_hdmi_set_stream_attribute,
|
||||
.dvi_set_stream_attribute =
|
||||
virtual_stream_encoder_dvi_set_stream_attribute,
|
||||
.set_mst_bandwidth =
|
||||
virtual_stream_encoder_set_mst_bandwidth,
|
||||
.set_throttled_vcp_size =
|
||||
virtual_stream_encoder_set_throttled_vcp_size,
|
||||
.update_hdmi_info_packets =
|
||||
virtual_stream_encoder_update_hdmi_info_packets,
|
||||
.stop_hdmi_info_packets =
|
||||
|
|
|
@ -36,10 +36,10 @@
|
|||
|
||||
/* Firmware versioning. */
|
||||
#ifdef DMUB_EXPOSE_VERSION
|
||||
#define DMUB_FW_VERSION_GIT_HASH 0x4e5b2f46f
|
||||
#define DMUB_FW_VERSION_GIT_HASH 0x82f998da6
|
||||
#define DMUB_FW_VERSION_MAJOR 0
|
||||
#define DMUB_FW_VERSION_MINOR 0
|
||||
#define DMUB_FW_VERSION_REVISION 29
|
||||
#define DMUB_FW_VERSION_REVISION 32
|
||||
#define DMUB_FW_VERSION_TEST 0
|
||||
#define DMUB_FW_VERSION_VBIOS 0
|
||||
#define DMUB_FW_VERSION_HOTFIX 0
|
||||
|
@ -97,6 +97,7 @@ union dmub_psr_debug_flags {
|
|||
struct {
|
||||
uint32_t visual_confirm : 1;
|
||||
uint32_t use_hw_lock_mgr : 1;
|
||||
uint32_t log_line_nums : 1;
|
||||
} bitfields;
|
||||
|
||||
uint32_t u32All;
|
||||
|
@ -791,12 +792,10 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
|
|||
|
||||
while (rptr != wptr) {
|
||||
uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
|
||||
//uint64_t volatile *p = (uint64_t volatile *)data;
|
||||
uint64_t temp;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
|
||||
temp = *data++;
|
||||
*data++;
|
||||
|
||||
rptr += DMUB_RB_CMD_SIZE;
|
||||
if (rptr >= rb->capacity)
|
||||
|
|
|
@ -318,4 +318,10 @@ struct bp_encoder_cap_info {
|
|||
uint32_t RESERVED:27;
|
||||
};
|
||||
|
||||
struct bp_soc_bb_info {
|
||||
uint32_t dram_clock_change_latency_100ns;
|
||||
uint32_t dram_sr_exit_latency_100ns;
|
||||
uint32_t dram_sr_enter_exit_latency_100ns;
|
||||
};
|
||||
|
||||
#endif /*__DAL_BIOS_PARSER_TYPES_H__ */
|
||||
|
|
|
@ -80,6 +80,7 @@ struct link_training_settings {
|
|||
|
||||
uint16_t cr_pattern_time;
|
||||
uint16_t eq_pattern_time;
|
||||
enum dc_dp_training_pattern pattern_for_cr;
|
||||
enum dc_dp_training_pattern pattern_for_eq;
|
||||
|
||||
bool enhanced_framing;
|
||||
|
|
|
@ -470,6 +470,14 @@ enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
|
|||
if (reset_status != MOD_HDCP_STATUS_SUCCESS)
|
||||
push_error_status(hdcp, reset_status);
|
||||
}
|
||||
|
||||
/* Clear CP_IRQ status if needed */
|
||||
if (event_ctx.event == MOD_HDCP_EVENT_CPIRQ) {
|
||||
status = mod_hdcp_clear_cp_irq_status(hdcp);
|
||||
if (status != MOD_HDCP_STATUS_SUCCESS)
|
||||
push_error_status(hdcp, status);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
|
@ -386,6 +386,7 @@ enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp);
|
|||
enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
|
||||
enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
|
||||
enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
|
||||
enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp);
|
||||
|
||||
/* hdcp version helpers */
|
||||
static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */
|
||||
#define HDCP_MAX_AUX_TRANSACTION_SIZE 16
|
||||
|
||||
#define DP_CP_IRQ (1 << 2)
|
||||
|
||||
enum mod_hdcp_ddc_message_id {
|
||||
MOD_HDCP_MESSAGE_ID_INVALID = -1,
|
||||
|
||||
|
@ -645,3 +647,18 @@ enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp)
|
|||
status = MOD_HDCP_STATUS_INVALID_OPERATION;
|
||||
return status;
|
||||
}
|
||||
|
||||
enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp)
|
||||
{
|
||||
uint8_t clear_cp_irq_bit = DP_CP_IRQ;
|
||||
uint32_t size = 1;
|
||||
|
||||
if (is_dp_hdcp(hdcp)) {
|
||||
uint32_t cp_irq_addrs = (hdcp->connection.link.dp.rev >= 0x14)
|
||||
? DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0:DP_DEVICE_SERVICE_IRQ_VECTOR;
|
||||
return hdcp->config.ddc.funcs.write_dpcd(hdcp->config.ddc.handle, cp_irq_addrs,
|
||||
&clear_cp_irq_bit, size) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
}
|
||||
|
||||
return MOD_HDCP_STATUS_INVALID_OPERATION;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#define MOD_HDCP_LOG_H_
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__)
|
||||
#define HDCP_LOG_ERR(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
|
||||
#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
|
||||
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
|
||||
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
|
||||
|
|
|
@ -88,7 +88,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
|
|||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
if (!psp->dtm_context.dtm_initialized) {
|
||||
DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
|
||||
DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
|
||||
display->state = MOD_HDCP_DISPLAY_INACTIVE;
|
||||
return MOD_HDCP_STATUS_FAILURE;
|
||||
}
|
||||
|
|
|
@ -2727,6 +2727,7 @@
|
|||
#define mmDB_STENCIL_WRITE_BASE_DEFAULT 0x00000000
|
||||
#define mmDB_RESERVED_REG_1_DEFAULT 0x00000000
|
||||
#define mmDB_RESERVED_REG_3_DEFAULT 0x00000000
|
||||
#define mmDB_VRS_OVERRIDE_CNTL_DEFAULT 0x00000000
|
||||
#define mmDB_Z_READ_BASE_HI_DEFAULT 0x00000000
|
||||
#define mmDB_STENCIL_READ_BASE_HI_DEFAULT 0x00000000
|
||||
#define mmDB_Z_WRITE_BASE_HI_DEFAULT 0x00000000
|
||||
|
@ -3062,6 +3063,7 @@
|
|||
#define mmPA_SU_OVER_RASTERIZATION_CNTL_DEFAULT 0x00000000
|
||||
#define mmPA_STEREO_CNTL_DEFAULT 0x00000000
|
||||
#define mmPA_STATE_STEREO_X_DEFAULT 0x00000000
|
||||
#define mmPA_CL_VRS_CNTL_DEFAULT 0x00000000
|
||||
#define mmPA_SU_POINT_SIZE_DEFAULT 0x00000000
|
||||
#define mmPA_SU_POINT_MINMAX_DEFAULT 0x00000000
|
||||
#define mmPA_SU_LINE_CNTL_DEFAULT 0x00000000
|
||||
|
|
|
@ -5379,6 +5379,8 @@
|
|||
#define mmDB_RESERVED_REG_1_BASE_IDX 1
|
||||
#define mmDB_RESERVED_REG_3 0x0017
|
||||
#define mmDB_RESERVED_REG_3_BASE_IDX 1
|
||||
#define mmDB_VRS_OVERRIDE_CNTL 0x0019
|
||||
#define mmDB_VRS_OVERRIDE_CNTL_BASE_IDX 1
|
||||
#define mmDB_Z_READ_BASE_HI 0x001a
|
||||
#define mmDB_Z_READ_BASE_HI_BASE_IDX 1
|
||||
#define mmDB_STENCIL_READ_BASE_HI 0x001b
|
||||
|
@ -6049,6 +6051,8 @@
|
|||
#define mmPA_STEREO_CNTL_BASE_IDX 1
|
||||
#define mmPA_STATE_STEREO_X 0x0211
|
||||
#define mmPA_STATE_STEREO_X_BASE_IDX 1
|
||||
#define mmPA_CL_VRS_CNTL 0x0212
|
||||
#define mmPA_CL_VRS_CNTL_BASE_IDX 1
|
||||
#define mmPA_SU_POINT_SIZE 0x0280
|
||||
#define mmPA_SU_POINT_SIZE_BASE_IDX 1
|
||||
#define mmPA_SU_POINT_MINMAX 0x0281
|
||||
|
|
|
@ -9777,6 +9777,7 @@
|
|||
#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE__SHIFT 0x3
|
||||
#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD__SHIFT 0x4
|
||||
#define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE__SHIFT 0x8
|
||||
#define DB_EXCEPTION_CONTROL__FORCE_VRS_RATE_FINE__SHIFT 0x10
|
||||
#define DB_EXCEPTION_CONTROL__DTAG_WATERMARK__SHIFT 0x18
|
||||
#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE_MASK 0x00000001L
|
||||
#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE_MASK 0x00000002L
|
||||
|
@ -9784,6 +9785,7 @@
|
|||
#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE_MASK 0x00000008L
|
||||
#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD_MASK 0x00000010L
|
||||
#define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE_MASK 0x00000F00L
|
||||
#define DB_EXCEPTION_CONTROL__FORCE_VRS_RATE_FINE_MASK 0x00FF0000L
|
||||
#define DB_EXCEPTION_CONTROL__DTAG_WATERMARK_MASK 0x7F000000L
|
||||
//DB_DFSM_CONFIG
|
||||
#define DB_DFSM_CONFIG__BYPASS_DFSM__SHIFT 0x0
|
||||
|
@ -10076,6 +10078,7 @@
|
|||
#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM__SHIFT 0x18
|
||||
#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT__SHIFT 0x19
|
||||
#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING__SHIFT 0x1a
|
||||
#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT__SHIFT 0x1c
|
||||
#define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT__SHIFT 0x1e
|
||||
#define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_BC__SHIFT 0x1f
|
||||
#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL_MASK 0x00000001L
|
||||
|
@ -10103,12 +10106,15 @@
|
|||
#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM_MASK 0x01000000L
|
||||
#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT_MASK 0x02000000L
|
||||
#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING_MASK 0x04000000L
|
||||
#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT_MASK 0x10000000L
|
||||
#define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_MASK 0x40000000L
|
||||
#define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_BC_MASK 0x80000000L
|
||||
//CB_HW_CONTROL
|
||||
#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x0
|
||||
#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION__SHIFT 0x1
|
||||
#define CB_HW_CONTROL__DISABLE_FILLRATE_OPT_FIX_WITH_CFC__SHIFT 0x3
|
||||
#define CB_HW_CONTROL__DISABLE_POST_DCC_WITH_CFC_FIX__SHIFT 0x4
|
||||
#define CB_HW_CONTROL__DISABLE_COMPRESS_1FRAG_WHEN_VRS_RATE_HINT_EN__SHIFT 0x5
|
||||
#define CB_HW_CONTROL__RMI_CREDITS__SHIFT 0x6
|
||||
#define CB_HW_CONTROL__CHICKEN_BITS__SHIFT 0xc
|
||||
#define CB_HW_CONTROL__DISABLE_FMASK_MULTI_MGCG_DOMAINS__SHIFT 0xf
|
||||
|
@ -10129,8 +10135,10 @@
|
|||
#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x1e
|
||||
#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x1f
|
||||
#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00000001L
|
||||
#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION_MASK 0x00000002L
|
||||
#define CB_HW_CONTROL__DISABLE_FILLRATE_OPT_FIX_WITH_CFC_MASK 0x00000008L
|
||||
#define CB_HW_CONTROL__DISABLE_POST_DCC_WITH_CFC_FIX_MASK 0x00000010L
|
||||
#define CB_HW_CONTROL__DISABLE_COMPRESS_1FRAG_WHEN_VRS_RATE_HINT_EN_MASK 0x00000020L
|
||||
#define CB_HW_CONTROL__RMI_CREDITS_MASK 0x00000FC0L
|
||||
#define CB_HW_CONTROL__CHICKEN_BITS_MASK 0x00007000L
|
||||
#define CB_HW_CONTROL__DISABLE_FMASK_MULTI_MGCG_DOMAINS_MASK 0x00008000L
|
||||
|
@ -19881,6 +19889,7 @@
|
|||
#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x16
|
||||
#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x17
|
||||
#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL__SHIFT 0x19
|
||||
#define DB_RENDER_OVERRIDE2__FORCE_VRS_RATE_FINE__SHIFT 0x1a
|
||||
#define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE__SHIFT 0x1b
|
||||
#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L
|
||||
#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001CL
|
||||
|
@ -19898,6 +19907,7 @@
|
|||
#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L
|
||||
#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L
|
||||
#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL_MASK 0x02000000L
|
||||
#define DB_RENDER_OVERRIDE2__FORCE_VRS_RATE_FINE_MASK 0x04000000L
|
||||
#define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE_MASK 0x18000000L
|
||||
//DB_HTILE_DATA_BASE
|
||||
#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x0
|
||||
|
@ -20021,6 +20031,13 @@
|
|||
//DB_RESERVED_REG_3
|
||||
#define DB_RESERVED_REG_3__FIELD_1__SHIFT 0x0
|
||||
#define DB_RESERVED_REG_3__FIELD_1_MASK 0x003FFFFFL
|
||||
//DB_VRS_OVERRIDE_CNTL
|
||||
#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE__SHIFT 0x0
|
||||
#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_X__SHIFT 0x4
|
||||
#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_Y__SHIFT 0x6
|
||||
#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE_MASK 0x00000007L
|
||||
#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_X_MASK 0x00000030L
|
||||
#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_Y_MASK 0x000000C0L
|
||||
//DB_Z_READ_BASE_HI
|
||||
#define DB_Z_READ_BASE_HI__BASE_HI__SHIFT 0x0
|
||||
#define DB_Z_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
|
||||
|
@ -22598,6 +22615,7 @@
|
|||
#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x18
|
||||
#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG__SHIFT 0x19
|
||||
#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT 0x1b
|
||||
#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE__SHIFT 0x1c
|
||||
#define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER__SHIFT 0x1d
|
||||
#define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER__SHIFT 0x1e
|
||||
#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L
|
||||
|
@ -22627,6 +22645,7 @@
|
|||
#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L
|
||||
#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG_MASK 0x02000000L
|
||||
#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK 0x08000000L
|
||||
#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE_MASK 0x10000000L
|
||||
#define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER_MASK 0x20000000L
|
||||
#define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER_MASK 0x40000000L
|
||||
//PA_CL_NANINF_CNTL
|
||||
|
@ -22740,6 +22759,19 @@
|
|||
//PA_STATE_STEREO_X
|
||||
#define PA_STATE_STEREO_X__STEREO_X_OFFSET__SHIFT 0x0
|
||||
#define PA_STATE_STEREO_X__STEREO_X_OFFSET_MASK 0xFFFFFFFFL
|
||||
//PA_CL_VRS_CNTL
|
||||
#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE__SHIFT 0x0
|
||||
#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE__SHIFT 0x3
|
||||
#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE__SHIFT 0x6
|
||||
#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE__SHIFT 0x9
|
||||
#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK__SHIFT 0xd
|
||||
#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO__SHIFT 0xe
|
||||
#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE_MASK 0x00000007L
|
||||
#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE_MASK 0x00000038L
|
||||
#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE_MASK 0x000001C0L
|
||||
#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE_MASK 0x00000E00L
|
||||
#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK_MASK 0x00002000L
|
||||
#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO_MASK 0x00004000L
|
||||
//PA_SU_POINT_SIZE
|
||||
#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x0
|
||||
#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x10
|
||||
|
@ -23088,6 +23120,7 @@
|
|||
#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x10
|
||||
#define DB_HTILE_SURFACE__RESERVED_FIELD_6__SHIFT 0x11
|
||||
#define DB_HTILE_SURFACE__PIPE_ALIGNED__SHIFT 0x12
|
||||
#define DB_HTILE_SURFACE__VRS_HTILE_ENCODING__SHIFT 0x13
|
||||
#define DB_HTILE_SURFACE__RESERVED_FIELD_1_MASK 0x00000001L
|
||||
#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L
|
||||
#define DB_HTILE_SURFACE__RESERVED_FIELD_2_MASK 0x00000004L
|
||||
|
@ -23097,6 +23130,7 @@
|
|||
#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L
|
||||
#define DB_HTILE_SURFACE__RESERVED_FIELD_6_MASK 0x00020000L
|
||||
#define DB_HTILE_SURFACE__PIPE_ALIGNED_MASK 0x00040000L
|
||||
#define DB_HTILE_SURFACE__VRS_HTILE_ENCODING_MASK 0x00180000L
|
||||
//DB_SRESULTS_COMPARE_STATE0
|
||||
#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x0
|
||||
#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x4
|
||||
|
@ -24954,6 +24988,7 @@
|
|||
#define CB_COLOR0_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||
#define CB_COLOR0_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||
#define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||
#define CB_COLOR0_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||
#define CB_COLOR0_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||
#define CB_COLOR0_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||
#define CB_COLOR0_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||
|
@ -24962,6 +24997,7 @@
|
|||
#define CB_COLOR0_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||
#define CB_COLOR0_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||
#define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||
#define CB_COLOR0_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||
//CB_COLOR1_ATTRIB3
|
||||
#define CB_COLOR1_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||
#define CB_COLOR1_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||
|
@ -24971,6 +25007,7 @@
|
|||
#define CB_COLOR1_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||
#define CB_COLOR1_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||
#define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||
#define CB_COLOR1_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||
#define CB_COLOR1_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||
#define CB_COLOR1_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||
#define CB_COLOR1_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||
|
@ -24979,6 +25016,7 @@
|
|||
#define CB_COLOR1_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||
#define CB_COLOR1_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||
#define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||
#define CB_COLOR1_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||
//CB_COLOR2_ATTRIB3
|
||||
#define CB_COLOR2_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||
#define CB_COLOR2_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||
|
@ -24988,6 +25026,7 @@
|
|||
#define CB_COLOR2_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||
#define CB_COLOR2_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||
#define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||
#define CB_COLOR2_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||
#define CB_COLOR2_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||
#define CB_COLOR2_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||
#define CB_COLOR2_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||
|
@ -24996,6 +25035,7 @@
|
|||
#define CB_COLOR2_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||
#define CB_COLOR2_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||
#define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||
#define CB_COLOR2_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||
//CB_COLOR3_ATTRIB3
|
||||
#define CB_COLOR3_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||
#define CB_COLOR3_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||
|
@ -25005,6 +25045,7 @@
|
|||
#define CB_COLOR3_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||
#define CB_COLOR3_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||
#define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||
#define CB_COLOR3_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||
#define CB_COLOR3_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||
#define CB_COLOR3_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||
#define CB_COLOR3_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||
|
@ -25013,6 +25054,7 @@
|
|||
#define CB_COLOR3_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||
#define CB_COLOR3_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||
#define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||
#define CB_COLOR3_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||
//CB_COLOR4_ATTRIB3
|
||||
#define CB_COLOR4_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||
#define CB_COLOR4_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||
|
@ -25022,6 +25064,7 @@
|
|||
#define CB_COLOR4_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||
#define CB_COLOR4_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||
#define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||
#define CB_COLOR4_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||
#define CB_COLOR4_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||
#define CB_COLOR4_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||
#define CB_COLOR4_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||
|
@ -25030,6 +25073,7 @@
|
|||
#define CB_COLOR4_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||
#define CB_COLOR4_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||
#define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||
#define CB_COLOR4_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||
//CB_COLOR5_ATTRIB3
|
||||
#define CB_COLOR5_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||
#define CB_COLOR5_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||
|
@ -25039,6 +25083,7 @@
|
|||
#define CB_COLOR5_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||
#define CB_COLOR5_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||
#define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||
#define CB_COLOR5_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||
#define CB_COLOR5_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||
#define CB_COLOR5_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||
#define CB_COLOR5_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||
|
@ -25047,6 +25092,7 @@
|
|||
#define CB_COLOR5_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||
#define CB_COLOR5_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||
#define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||
#define CB_COLOR5_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||
//CB_COLOR6_ATTRIB3
|
||||
#define CB_COLOR6_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||
#define CB_COLOR6_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||
|
@ -25056,6 +25102,7 @@
|
|||
#define CB_COLOR6_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||
#define CB_COLOR6_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||
#define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||
#define CB_COLOR6_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||
#define CB_COLOR6_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||
#define CB_COLOR6_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||
#define CB_COLOR6_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||
|
@ -25064,6 +25111,7 @@
|
|||
#define CB_COLOR6_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||
#define CB_COLOR6_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||
#define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||
#define CB_COLOR6_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||
//CB_COLOR7_ATTRIB3
|
||||
#define CB_COLOR7_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
|
||||
#define CB_COLOR7_ATTRIB3__META_LINEAR__SHIFT 0xd
|
||||
|
@ -25073,6 +25121,7 @@
|
|||
#define CB_COLOR7_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a
|
||||
#define CB_COLOR7_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b
|
||||
#define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
|
||||
#define CB_COLOR7_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f
|
||||
#define CB_COLOR7_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
|
||||
#define CB_COLOR7_ATTRIB3__META_LINEAR_MASK 0x00002000L
|
||||
#define CB_COLOR7_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
|
||||
|
@ -25081,6 +25130,7 @@
|
|||
#define CB_COLOR7_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L
|
||||
#define CB_COLOR7_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L
|
||||
#define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
|
||||
#define CB_COLOR7_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L
|
||||
|
||||
|
||||
// addressBlock: gc_gfxudec
|
||||
|
|
|
@ -151,6 +151,8 @@
|
|||
#define mmUVD_LMI_CTRL2_BASE_IDX 1
|
||||
#define mmUVD_MASTINT_EN 0x0540
|
||||
#define mmUVD_MASTINT_EN_BASE_IDX 1
|
||||
#define mmUVD_FW_STATUS 0x0557
|
||||
#define mmUVD_FW_STATUS_BASE_IDX 1
|
||||
#define mmJPEG_CGC_CTRL 0x0565
|
||||
#define mmJPEG_CGC_CTRL_BASE_IDX 1
|
||||
#define mmUVD_LMI_CTRL 0x0566
|
||||
|
@ -219,4 +221,5 @@
|
|||
#define mmUVD_CONTEXT_ID2_BASE_IDX 1
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -807,5 +807,25 @@
|
|||
#define UVD_CONTEXT_ID2__CONTEXT_ID2__SHIFT 0x0
|
||||
#define UVD_CONTEXT_ID2__CONTEXT_ID2_MASK 0xFFFFFFFFL
|
||||
|
||||
//UVD_FW_STATUS
|
||||
#define UVD_FW_STATUS__BUSY__SHIFT 0x0
|
||||
#define UVD_FW_STATUS__ACTIVE__SHIFT 0x1
|
||||
#define UVD_FW_STATUS__SEND_EFUSE_REQ__SHIFT 0x2
|
||||
#define UVD_FW_STATUS__DONE__SHIFT 0x8
|
||||
#define UVD_FW_STATUS__PASS__SHIFT 0x10
|
||||
#define UVD_FW_STATUS__FAIL__SHIFT 0x11
|
||||
#define UVD_FW_STATUS__INVALID_LEN__SHIFT 0x12
|
||||
#define UVD_FW_STATUS__INVALID_0_PADDING__SHIFT 0x13
|
||||
#define UVD_FW_STATUS__INVALID_NONCE__SHIFT 0x14
|
||||
#define UVD_FW_STATUS__BUSY_MASK 0x00000001L
|
||||
#define UVD_FW_STATUS__ACTIVE_MASK 0x00000002L
|
||||
#define UVD_FW_STATUS__SEND_EFUSE_REQ_MASK 0x00000004L
|
||||
#define UVD_FW_STATUS__DONE_MASK 0x00000100L
|
||||
#define UVD_FW_STATUS__PASS_MASK 0x00010000L
|
||||
#define UVD_FW_STATUS__FAIL_MASK 0x00020000L
|
||||
#define UVD_FW_STATUS__INVALID_LEN_MASK 0x00040000L
|
||||
#define UVD_FW_STATUS__INVALID_0_PADDING_MASK 0x00080000L
|
||||
#define UVD_FW_STATUS__INVALID_NONCE_MASK 0x00100000L
|
||||
|
||||
|
||||
#endif
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue