mirror of https://gitee.com/openkylin/linux.git
Merge branch 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux into drm-next
- Continued cleanup and restructuring of powerplay - Fetch VRAM type from vbios rather than hardcoding for SOC15 asics - Allow ttm to drop its backing store when drivers don't need it - DC bandwidth calc updates - Enable DC backlight control pre-DCE11 asics - Enable DC on all supported asics - DC Fixes for planes due to the way our hw is ordered vs what drm expects - DC CTM/regamma fixes - Misc cleanup and bug fixes * 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux: (89 commits) amdgpu/dm: Default PRE_VEGA ASIC support to 'y' drm/amd/pp: Remove the cgs wrapper for notify smu version on APU drm/amd/display: fix dereferencing possible ERR_PTR() drm/amd/display: Refine disable VGA drm/amdgpu: Improve documentation of bo_ptr in amdgpu_bo_create_kernel drm/radeon: Don't turn off DP sink when disconnected drm/amd/pp: Rename file name cz_* to smu8_* drm/amd/pp: Replace function/struct name cz_* with smu8_* drm/amd/pp: Remove unneeded void * casts in cz_hwmgr.c/cz_smumgr.c drm/amd/pp: Mv cz uvd/vce pg/dpm functions to cz_hwmgr.c drm/amd/pp: Remove dead header file pp_asicblocks.h drm/amd/pp: Delete dead code on cz_clockpowergating.c drm/amdgpu: Call amdgpu_ucode_fini_bo in amd_powerplay.c drm/amdgpu: Remove wrapper layer of smu ip functions drm/amdgpu: Don't compared ip_block_type with ip_block_index drm/amdgpu: Plus NULL function pointer check drm/amd/pp: Move helper functions to smu_help.c drm/amd/pp: Replace rv_* with smu10_* drm/amd/pp: Fix function parameter not correct drm/amd/pp: Add rv_copy_table_from/to_smc to smu backend function table ...
This commit is contained in:
commit
287d2ac36b
|
@ -87,8 +87,7 @@ amdgpu-y += \
|
|||
|
||||
# add SMC block
|
||||
amdgpu-y += \
|
||||
amdgpu_dpm.o \
|
||||
amdgpu_powerplay.o
|
||||
amdgpu_dpm.o
|
||||
|
||||
# add DCE block
|
||||
amdgpu-y += \
|
||||
|
|
|
@ -441,7 +441,7 @@ struct amdgpu_sa_bo {
|
|||
void amdgpu_gem_force_release(struct amdgpu_device *adev);
|
||||
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
||||
int alignment, u32 initial_domain,
|
||||
u64 flags, bool kernel,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct reservation_object *resv,
|
||||
struct drm_gem_object **obj);
|
||||
|
||||
|
@ -1081,8 +1081,6 @@ struct amdgpu_wb {
|
|||
int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
|
||||
void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
|
||||
|
||||
void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
|
||||
|
||||
/*
|
||||
* SDMA
|
||||
*/
|
||||
|
@ -1395,9 +1393,7 @@ enum amd_hw_ip_block_type {
|
|||
#define HWIP_MAX_INSTANCE 6
|
||||
|
||||
struct amd_powerplay {
|
||||
struct cgs_device *cgs_device;
|
||||
void *pp_handle;
|
||||
const struct amd_ip_funcs *ip_funcs;
|
||||
const struct amd_pm_funcs *pp_funcs;
|
||||
};
|
||||
|
||||
|
@ -1632,6 +1628,9 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
|||
uint32_t acc_flags);
|
||||
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
uint32_t acc_flags);
|
||||
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
|
||||
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
|
||||
|
||||
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
|
||||
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
|
||||
|
||||
|
@ -1655,6 +1654,9 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
|
|||
#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
|
||||
#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
|
||||
|
||||
#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
|
||||
#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
|
||||
|
||||
#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
|
||||
#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
|
||||
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
|
||||
|
|
|
@ -221,8 +221,9 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||
uint64_t gpu_addr_tmp = 0;
|
||||
void *cpu_ptr_tmp = NULL;
|
||||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
|
||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &bo);
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
|
||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel,
|
||||
NULL, &bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
"failed to allocate BO for amdkfd (%d)\n", r);
|
||||
|
|
|
@ -997,8 +997,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
|||
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
|
||||
va, size, domain_string(alloc_domain));
|
||||
|
||||
ret = amdgpu_bo_create(adev, size, byte_align, false,
|
||||
alloc_domain, alloc_flags, NULL, NULL, &bo);
|
||||
ret = amdgpu_bo_create(adev, size, byte_align,
|
||||
alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
|
||||
if (ret) {
|
||||
pr_debug("Failed to create BO on domain %s. ret %d\n",
|
||||
domain_string(alloc_domain), ret);
|
||||
|
|
|
@ -114,6 +114,9 @@ union igp_info {
|
|||
struct atom_integrated_system_info_v1_11 v11;
|
||||
};
|
||||
|
||||
union umc_info {
|
||||
struct atom_umc_info_v3_1 v31;
|
||||
};
|
||||
/*
|
||||
* Return vram width from integrated system info table, if available,
|
||||
* or 0 if not.
|
||||
|
@ -143,6 +146,94 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
|
||||
int atom_mem_type)
|
||||
{
|
||||
int vram_type;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
switch (atom_mem_type) {
|
||||
case Ddr2MemType:
|
||||
case LpDdr2MemType:
|
||||
vram_type = AMDGPU_VRAM_TYPE_DDR2;
|
||||
break;
|
||||
case Ddr3MemType:
|
||||
case LpDdr3MemType:
|
||||
vram_type = AMDGPU_VRAM_TYPE_DDR3;
|
||||
break;
|
||||
case Ddr4MemType:
|
||||
case LpDdr4MemType:
|
||||
vram_type = AMDGPU_VRAM_TYPE_DDR4;
|
||||
break;
|
||||
default:
|
||||
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (atom_mem_type) {
|
||||
case ATOM_DGPU_VRAM_TYPE_GDDR5:
|
||||
vram_type = AMDGPU_VRAM_TYPE_GDDR5;
|
||||
break;
|
||||
case ATOM_DGPU_VRAM_TYPE_HBM:
|
||||
vram_type = AMDGPU_VRAM_TYPE_HBM;
|
||||
break;
|
||||
default:
|
||||
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return vram_type;
|
||||
}
|
||||
/*
|
||||
* Return vram type from either integrated system info table
|
||||
* or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
|
||||
*/
|
||||
int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
int index;
|
||||
u16 data_offset, size;
|
||||
union igp_info *igp_info;
|
||||
union umc_info *umc_info;
|
||||
u8 frev, crev;
|
||||
u8 mem_type;
|
||||
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
integratedsysteminfo);
|
||||
else
|
||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
umc_info);
|
||||
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
|
||||
index, &size,
|
||||
&frev, &crev, &data_offset)) {
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
igp_info = (union igp_info *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
switch (crev) {
|
||||
case 11:
|
||||
mem_type = igp_info->v11.memorytype;
|
||||
return convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
umc_info = (union umc_info *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
switch (crev) {
|
||||
case 1:
|
||||
mem_type = umc_info->v31.vram_type;
|
||||
return convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
union firmware_info {
|
||||
struct atom_firmware_info_v3_1 v31;
|
||||
};
|
||||
|
@ -151,10 +242,6 @@ union smu_info {
|
|||
struct atom_smu_info_v3_1 v31;
|
||||
};
|
||||
|
||||
union umc_info {
|
||||
struct atom_umc_info_v3_1 v31;
|
||||
};
|
||||
|
||||
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
|
|
|
@ -28,6 +28,7 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
|
|||
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -80,8 +80,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
|||
int time;
|
||||
|
||||
n = AMDGPU_BENCHMARK_ITERATIONS;
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
|
||||
NULL, &sobj);
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0,
|
||||
ttm_bo_type_kernel, NULL, &sobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
@ -93,8 +93,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
|||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
|
||||
NULL, &dobj);
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0,
|
||||
ttm_bo_type_kernel, NULL, &dobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
|
|
@ -654,11 +654,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
else
|
||||
strcpy(fw_name, "amdgpu/vega10_smc.bin");
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
case CHIP_RAVEN:
|
||||
adev->pm.fw_version = info->version;
|
||||
return 0;
|
||||
default:
|
||||
DRM_ERROR("SMC firmware not supported\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
|
|||
/* don't do anything if sink is not display port, i.e.,
|
||||
* passive dp->(dvi|hdmi) adaptor
|
||||
*/
|
||||
if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
|
||||
int saved_dpms = connector->dpms;
|
||||
/* Only turn off the display if it's physically disconnected */
|
||||
if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
||||
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
|
||||
/* Don't try to start link training before we
|
||||
* have the dpcd */
|
||||
if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
|
||||
return;
|
||||
if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
|
||||
amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
|
||||
amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
|
||||
/* Don't start link training before we have the DPCD */
|
||||
if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
|
||||
return;
|
||||
|
||||
/* set it to OFF so that drm_helper_connector_dpms()
|
||||
* won't return immediately since the current state
|
||||
* is ON at this point.
|
||||
*/
|
||||
connector->dpms = DRM_MODE_DPMS_OFF;
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
connector->dpms = saved_dpms;
|
||||
/* Turn the connector off and back on immediately, which
|
||||
* will trigger link training
|
||||
*/
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
||||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -87,6 +87,8 @@ static const char *amdgpu_asic_name[] = {
|
|||
"LAST",
|
||||
};
|
||||
|
||||
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
|
||||
|
||||
bool amdgpu_device_is_px(struct drm_device *dev)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
@ -121,6 +123,32 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* MMIO register read with bytes helper functions
|
||||
* @offset:bytes offset from MMIO start
|
||||
*
|
||||
*/
|
||||
|
||||
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
|
||||
if (offset < adev->rmmio_size)
|
||||
return (readb(adev->rmmio + offset));
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* MMIO register write with bytes helper functions
|
||||
* @offset:bytes offset from MMIO start
|
||||
* @value: the value want to be written to the register
|
||||
*
|
||||
*/
|
||||
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
|
||||
if (offset < adev->rmmio_size)
|
||||
writeb(value, adev->rmmio + offset);
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
||||
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
uint32_t acc_flags)
|
||||
{
|
||||
|
@ -830,6 +858,8 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
|
|||
dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
|
||||
amdgpu_lockup_timeout = 10000;
|
||||
}
|
||||
|
||||
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1387,7 +1417,8 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
|
|||
continue;
|
||||
/* skip CG for VCE/UVD, it's handled specially */
|
||||
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
|
||||
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
|
||||
/* enable clockgating to save power */
|
||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
||||
AMD_CG_STATE_GATE);
|
||||
|
@ -1436,7 +1467,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
|
|||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
|
||||
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
|
||||
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
|
||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
|
@ -1545,7 +1577,8 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
|
|||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
/* ungate blocks so that suspend can properly shut them down */
|
||||
if (i != AMD_IP_BLOCK_TYPE_SMC) {
|
||||
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
|
||||
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
|
||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
if (r) {
|
||||
|
@ -1878,6 +1911,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
if (adev->rio_mem == NULL)
|
||||
DRM_INFO("PCI I/O BAR is not found.\n");
|
||||
|
||||
amdgpu_device_get_pcie_info(adev);
|
||||
|
||||
/* early init functions */
|
||||
r = amdgpu_device_ip_early_init(adev);
|
||||
if (r)
|
||||
|
@ -2086,6 +2121,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||
|
||||
amdgpu_ib_pool_fini(adev);
|
||||
amdgpu_fence_driver_fini(adev);
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
amdgpu_fbdev_fini(adev);
|
||||
r = amdgpu_device_ip_fini(adev);
|
||||
if (adev->firmware.gpu_info_fw) {
|
||||
|
@ -2114,7 +2150,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||
iounmap(adev->rmmio);
|
||||
adev->rmmio = NULL;
|
||||
amdgpu_device_doorbell_fini(adev);
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
amdgpu_debugfs_regs_cleanup(adev);
|
||||
}
|
||||
|
||||
|
@ -2755,7 +2790,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
|
||||
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 mask;
|
||||
int ret;
|
||||
|
|
|
@ -113,11 +113,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
|
|||
int r;
|
||||
|
||||
if (adev->gart.robj == NULL) {
|
||||
r = amdgpu_bo_create(adev, adev->gart.table_size,
|
||||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &adev->gart.robj);
|
||||
ttm_bo_type_kernel, NULL,
|
||||
&adev->gart.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
@ -315,7 +316,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
|||
t = offset / AMDGPU_GPU_PAGE_SIZE;
|
||||
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
||||
for (i = 0; i < pages; i++, p++)
|
||||
adev->gart.pages[p] = pagelist[i];
|
||||
adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
|
||||
#endif
|
||||
|
||||
if (!adev->gart.ptr)
|
||||
|
|
|
@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
|
|||
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
if (robj) {
|
||||
if (robj->gem_base.import_attach)
|
||||
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
|
||||
amdgpu_mn_unregister(robj);
|
||||
amdgpu_bo_unref(&robj);
|
||||
}
|
||||
|
@ -45,7 +43,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
|
|||
|
||||
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
||||
int alignment, u32 initial_domain,
|
||||
u64 flags, bool kernel,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct reservation_object *resv,
|
||||
struct drm_gem_object **obj)
|
||||
{
|
||||
|
@ -59,8 +57,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
|||
}
|
||||
|
||||
retry:
|
||||
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
|
||||
flags, NULL, resv, &bo);
|
||||
r = amdgpu_bo_create(adev, size, alignment, initial_domain,
|
||||
flags, type, resv, &bo);
|
||||
if (r) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
|
||||
|
|
|
@ -25,51 +25,12 @@
|
|||
#define __AMDGPU_IH_H__
|
||||
|
||||
#include <linux/chash.h>
|
||||
#include "soc15_ih_clientid.h"
|
||||
|
||||
struct amdgpu_device;
|
||||
/*
|
||||
* vega10+ IH clients
|
||||
*/
|
||||
enum amdgpu_ih_clientid
|
||||
{
|
||||
AMDGPU_IH_CLIENTID_IH = 0x00,
|
||||
AMDGPU_IH_CLIENTID_ACP = 0x01,
|
||||
AMDGPU_IH_CLIENTID_ATHUB = 0x02,
|
||||
AMDGPU_IH_CLIENTID_BIF = 0x03,
|
||||
AMDGPU_IH_CLIENTID_DCE = 0x04,
|
||||
AMDGPU_IH_CLIENTID_ISP = 0x05,
|
||||
AMDGPU_IH_CLIENTID_PCIE0 = 0x06,
|
||||
AMDGPU_IH_CLIENTID_RLC = 0x07,
|
||||
AMDGPU_IH_CLIENTID_SDMA0 = 0x08,
|
||||
AMDGPU_IH_CLIENTID_SDMA1 = 0x09,
|
||||
AMDGPU_IH_CLIENTID_SE0SH = 0x0a,
|
||||
AMDGPU_IH_CLIENTID_SE1SH = 0x0b,
|
||||
AMDGPU_IH_CLIENTID_SE2SH = 0x0c,
|
||||
AMDGPU_IH_CLIENTID_SE3SH = 0x0d,
|
||||
AMDGPU_IH_CLIENTID_SYSHUB = 0x0e,
|
||||
AMDGPU_IH_CLIENTID_THM = 0x0f,
|
||||
AMDGPU_IH_CLIENTID_UVD = 0x10,
|
||||
AMDGPU_IH_CLIENTID_VCE0 = 0x11,
|
||||
AMDGPU_IH_CLIENTID_VMC = 0x12,
|
||||
AMDGPU_IH_CLIENTID_XDMA = 0x13,
|
||||
AMDGPU_IH_CLIENTID_GRBM_CP = 0x14,
|
||||
AMDGPU_IH_CLIENTID_ATS = 0x15,
|
||||
AMDGPU_IH_CLIENTID_ROM_SMUIO = 0x16,
|
||||
AMDGPU_IH_CLIENTID_DF = 0x17,
|
||||
AMDGPU_IH_CLIENTID_VCE1 = 0x18,
|
||||
AMDGPU_IH_CLIENTID_PWR = 0x19,
|
||||
AMDGPU_IH_CLIENTID_UTCL2 = 0x1b,
|
||||
AMDGPU_IH_CLIENTID_EA = 0x1c,
|
||||
AMDGPU_IH_CLIENTID_UTCL2LOG = 0x1d,
|
||||
AMDGPU_IH_CLIENTID_MP0 = 0x1e,
|
||||
AMDGPU_IH_CLIENTID_MP1 = 0x1f,
|
||||
|
||||
AMDGPU_IH_CLIENTID_MAX,
|
||||
|
||||
AMDGPU_IH_CLIENTID_VCN = AMDGPU_IH_CLIENTID_UVD
|
||||
};
|
||||
|
||||
#define AMDGPU_IH_CLIENTID_LEGACY 0
|
||||
#define AMDGPU_IH_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
|
||||
|
||||
#define AMDGPU_PAGEFAULT_HASH_BITS 8
|
||||
struct amdgpu_retryfault_hashtable {
|
||||
|
|
|
@ -350,6 +350,7 @@ struct amdgpu_mode_info {
|
|||
u16 firmware_flags;
|
||||
/* pointer to backlight encoder */
|
||||
struct amdgpu_encoder *bl_encoder;
|
||||
u8 bl_level; /* saved backlight level */
|
||||
struct amdgpu_audio audio; /* audio stuff */
|
||||
int num_crtc; /* number of crtcs */
|
||||
int num_hpd; /* number of hpd pins */
|
||||
|
@ -550,14 +551,6 @@ struct amdgpu_connector {
|
|||
/* we need to mind the EDID between detect
|
||||
and get modes due to analog/digital/tvencoder */
|
||||
struct edid *edid;
|
||||
/* number of modes generated from EDID at 'dc_sink' */
|
||||
int num_modes;
|
||||
/* The 'old' sink - before an HPD.
|
||||
* The 'current' sink is in dc_link->sink. */
|
||||
struct dc_sink *dc_sink;
|
||||
struct dc_link *dc_link;
|
||||
struct dc_sink *dc_em_sink;
|
||||
const struct dc_stream *stream;
|
||||
void *con_priv;
|
||||
bool dac_load_detect;
|
||||
bool detected_by_load; /* if the connection status was determined by load */
|
||||
|
@ -568,27 +561,6 @@ struct amdgpu_connector {
|
|||
enum amdgpu_connector_audio audio;
|
||||
enum amdgpu_connector_dither dither;
|
||||
unsigned pixelclock_for_modeset;
|
||||
|
||||
struct drm_dp_mst_topology_mgr mst_mgr;
|
||||
struct amdgpu_dm_dp_aux dm_dp_aux;
|
||||
struct drm_dp_mst_port *port;
|
||||
struct amdgpu_connector *mst_port;
|
||||
struct amdgpu_encoder *mst_encoder;
|
||||
struct semaphore mst_sem;
|
||||
|
||||
/* TODO see if we can merge with ddc_bus or make a dm_connector */
|
||||
struct amdgpu_i2c_adapter *i2c;
|
||||
|
||||
/* Monitor range limits */
|
||||
int min_vfreq ;
|
||||
int max_vfreq ;
|
||||
int pixel_clock_mhz;
|
||||
|
||||
/*freesync caps*/
|
||||
struct mod_freesync_caps caps;
|
||||
|
||||
struct mutex hpd_lock;
|
||||
|
||||
};
|
||||
|
||||
/* TODO: start to use this struct and remove same field from base one */
|
||||
|
|
|
@ -60,6 +60,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
|||
|
||||
amdgpu_bo_kunmap(bo);
|
||||
|
||||
if (bo->gem_base.import_attach)
|
||||
drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
|
||||
drm_gem_object_release(&bo->gem_base);
|
||||
amdgpu_bo_unref(&bo->parent);
|
||||
if (!list_empty(&bo->shadow_list)) {
|
||||
|
@ -173,13 +175,15 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
|||
* @size: size for the new BO
|
||||
* @align: alignment for the new BO
|
||||
* @domain: where to place it
|
||||
* @bo_ptr: resulting BO
|
||||
* @bo_ptr: used to initialize BOs in structures
|
||||
* @gpu_addr: GPU addr of the pinned BO
|
||||
* @cpu_addr: optional CPU address mapping
|
||||
*
|
||||
* Allocates and pins a BO for kernel internal use, and returns it still
|
||||
* reserved.
|
||||
*
|
||||
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
|
||||
*
|
||||
* Returns 0 on success, negative error code otherwise.
|
||||
*/
|
||||
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
||||
|
@ -191,10 +195,10 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
|||
int r;
|
||||
|
||||
if (!*bo_ptr) {
|
||||
r = amdgpu_bo_create(adev, size, align, true, domain,
|
||||
r = amdgpu_bo_create(adev, size, align, domain,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, bo_ptr);
|
||||
ttm_bo_type_kernel, NULL, bo_ptr);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
|
||||
r);
|
||||
|
@ -242,12 +246,14 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
|||
* @size: size for the new BO
|
||||
* @align: alignment for the new BO
|
||||
* @domain: where to place it
|
||||
* @bo_ptr: resulting BO
|
||||
* @bo_ptr: used to initialize BOs in structures
|
||||
* @gpu_addr: GPU addr of the pinned BO
|
||||
* @cpu_addr: optional CPU address mapping
|
||||
*
|
||||
* Allocates and pins a BO for kernel internal use.
|
||||
*
|
||||
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
|
||||
*
|
||||
* Returns 0 on success, negative error code otherwise.
|
||||
*/
|
||||
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
||||
|
@ -335,21 +341,19 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
|
|||
return false;
|
||||
}
|
||||
|
||||
static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain, u64 flags,
|
||||
struct sg_table *sg,
|
||||
static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
|
||||
int byte_align, u32 domain,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct reservation_object *resv,
|
||||
struct amdgpu_bo **bo_ptr)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = !kernel,
|
||||
.interruptible = (type != ttm_bo_type_kernel),
|
||||
.no_wait_gpu = false,
|
||||
.resv = resv,
|
||||
.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
|
||||
};
|
||||
struct amdgpu_bo *bo;
|
||||
enum ttm_bo_type type;
|
||||
unsigned long page_align;
|
||||
size_t acc_size;
|
||||
int r;
|
||||
|
@ -360,13 +364,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
|||
if (!amdgpu_bo_validate_size(adev, size, domain))
|
||||
return -ENOMEM;
|
||||
|
||||
if (kernel) {
|
||||
type = ttm_bo_type_kernel;
|
||||
} else if (sg) {
|
||||
type = ttm_bo_type_sg;
|
||||
} else {
|
||||
type = ttm_bo_type_device;
|
||||
}
|
||||
*bo_ptr = NULL;
|
||||
|
||||
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
|
||||
|
@ -385,7 +382,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
|||
AMDGPU_GEM_DOMAIN_GWS |
|
||||
AMDGPU_GEM_DOMAIN_OA);
|
||||
bo->allowed_domains = bo->preferred_domains;
|
||||
if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
|
||||
if (type != ttm_bo_type_kernel &&
|
||||
bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
|
||||
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
|
||||
|
||||
bo->flags = flags;
|
||||
|
@ -423,7 +421,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
|||
|
||||
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, &ctx, acc_size,
|
||||
sg, resv, &amdgpu_ttm_bo_destroy);
|
||||
NULL, resv, &amdgpu_ttm_bo_destroy);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
|
@ -435,7 +433,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
|||
else
|
||||
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
|
||||
|
||||
if (kernel)
|
||||
if (type == ttm_bo_type_kernel)
|
||||
bo->tbo.priority = 1;
|
||||
|
||||
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
|
||||
|
@ -479,12 +477,11 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
|
|||
if (bo->shadow)
|
||||
return 0;
|
||||
|
||||
r = amdgpu_bo_do_create(adev, size, byte_align, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
|
||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
|
||||
AMDGPU_GEM_CREATE_SHADOW,
|
||||
NULL, bo->tbo.resv,
|
||||
&bo->shadow);
|
||||
ttm_bo_type_kernel,
|
||||
bo->tbo.resv, &bo->shadow);
|
||||
if (!r) {
|
||||
bo->shadow->parent = amdgpu_bo_ref(bo);
|
||||
mutex_lock(&adev->shadow_list_lock);
|
||||
|
@ -495,18 +492,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain, u64 flags,
|
||||
struct sg_table *sg,
|
||||
int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
|
||||
int byte_align, u32 domain,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct reservation_object *resv,
|
||||
struct amdgpu_bo **bo_ptr)
|
||||
{
|
||||
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
|
||||
parent_flags, sg, resv, bo_ptr);
|
||||
r = amdgpu_bo_do_create(adev, size, byte_align, domain,
|
||||
parent_flags, type, resv, bo_ptr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -821,7 +817,8 @@ static const char *amdgpu_vram_names[] = {
|
|||
"GDDR4",
|
||||
"GDDR5",
|
||||
"HBM",
|
||||
"DDR3"
|
||||
"DDR3",
|
||||
"DDR4",
|
||||
};
|
||||
|
||||
int amdgpu_bo_init(struct amdgpu_device *adev)
|
||||
|
|
|
@ -203,12 +203,11 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
|
|||
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
|
||||
}
|
||||
|
||||
int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain, u64 flags,
|
||||
struct sg_table *sg,
|
||||
struct reservation_object *resv,
|
||||
struct amdgpu_bo **bo_ptr);
|
||||
int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
|
||||
int byte_align, u32 domain,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct reservation_object *resv,
|
||||
struct amdgpu_bo **bo_ptr);
|
||||
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
||||
unsigned long size, int align,
|
||||
u32 domain, struct amdgpu_bo **bo_ptr,
|
||||
|
|
|
@ -1154,7 +1154,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
|||
umode_t effective_mode = attr->mode;
|
||||
|
||||
/* handle non-powerplay limitations */
|
||||
if (!adev->powerplay.cgs_device) {
|
||||
if (!adev->powerplay.pp_handle) {
|
||||
/* Skip fan attributes if fan is not present */
|
||||
if (adev->pm.no_fan &&
|
||||
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
|
||||
|
|
|
@ -1,288 +0,0 @@
|
|||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
#include "atom.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amd_shared.h"
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include "amdgpu_pm.h"
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu_powerplay.h"
|
||||
#include "si_dpm.h"
|
||||
#include "cik_dpm.h"
|
||||
#include "vi_dpm.h"
|
||||
|
||||
static int amdgpu_pp_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amd_powerplay *amd_pp;
|
||||
int ret = 0;
|
||||
|
||||
amd_pp = &(adev->powerplay);
|
||||
amd_pp->pp_handle = (void *)adev;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_TONGA:
|
||||
case CHIP_FIJI:
|
||||
case CHIP_TOPAZ:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_RAVEN:
|
||||
amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
|
||||
amd_pp->ip_funcs = &pp_ip_funcs;
|
||||
amd_pp->pp_funcs = &pp_dpm_funcs;
|
||||
break;
|
||||
/* These chips don't have powerplay implemenations */
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
case CHIP_HAINAN:
|
||||
amd_pp->ip_funcs = &si_dpm_ip_funcs;
|
||||
amd_pp->pp_funcs = &si_dpm_funcs;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
if (amdgpu_dpm == -1) {
|
||||
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
|
||||
amd_pp->pp_funcs = &ci_dpm_funcs;
|
||||
} else {
|
||||
amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
|
||||
amd_pp->ip_funcs = &pp_ip_funcs;
|
||||
amd_pp->pp_funcs = &pp_dpm_funcs;
|
||||
}
|
||||
break;
|
||||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
case CHIP_KAVERI:
|
||||
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
|
||||
amd_pp->pp_funcs = &kv_dpm_funcs;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (adev->powerplay.ip_funcs->early_init)
|
||||
ret = adev->powerplay.ip_funcs->early_init(adev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int amdgpu_pp_late_init(void *handle)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->powerplay.ip_funcs->late_init)
|
||||
ret = adev->powerplay.ip_funcs->late_init(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_pp_sw_init(void *handle)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->powerplay.ip_funcs->sw_init)
|
||||
ret = adev->powerplay.ip_funcs->sw_init(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_pp_sw_fini(void *handle)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->powerplay.ip_funcs->sw_fini)
|
||||
ret = adev->powerplay.ip_funcs->sw_fini(
|
||||
adev->powerplay.pp_handle);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_pp_hw_init(void *handle)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
|
||||
amdgpu_ucode_init_bo(adev);
|
||||
|
||||
if (adev->powerplay.ip_funcs->hw_init)
|
||||
ret = adev->powerplay.ip_funcs->hw_init(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_pp_hw_fini(void *handle)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->powerplay.ip_funcs->hw_fini)
|
||||
ret = adev->powerplay.ip_funcs->hw_fini(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
|
||||
amdgpu_ucode_fini_bo(adev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void amdgpu_pp_late_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->powerplay.ip_funcs->late_fini)
|
||||
adev->powerplay.ip_funcs->late_fini(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
if (adev->powerplay.cgs_device)
|
||||
amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
|
||||
}
|
||||
|
||||
static int amdgpu_pp_suspend(void *handle)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->powerplay.ip_funcs->suspend)
|
||||
ret = adev->powerplay.ip_funcs->suspend(
|
||||
adev->powerplay.pp_handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_pp_resume(void *handle)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->powerplay.ip_funcs->resume)
|
||||
ret = adev->powerplay.ip_funcs->resume(
|
||||
adev->powerplay.pp_handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_pp_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->powerplay.ip_funcs->set_clockgating_state)
|
||||
ret = adev->powerplay.ip_funcs->set_clockgating_state(
|
||||
adev->powerplay.pp_handle, state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_pp_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->powerplay.ip_funcs->set_powergating_state)
|
||||
ret = adev->powerplay.ip_funcs->set_powergating_state(
|
||||
adev->powerplay.pp_handle, state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static bool amdgpu_pp_is_idle(void *handle)
|
||||
{
|
||||
bool ret = true;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->powerplay.ip_funcs->is_idle)
|
||||
ret = adev->powerplay.ip_funcs->is_idle(
|
||||
adev->powerplay.pp_handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_pp_wait_for_idle(void *handle)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->powerplay.ip_funcs->wait_for_idle)
|
||||
ret = adev->powerplay.ip_funcs->wait_for_idle(
|
||||
adev->powerplay.pp_handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_pp_soft_reset(void *handle)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->powerplay.ip_funcs->soft_reset)
|
||||
ret = adev->powerplay.ip_funcs->soft_reset(
|
||||
adev->powerplay.pp_handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
|
||||
.name = "amdgpu_powerplay",
|
||||
.early_init = amdgpu_pp_early_init,
|
||||
.late_init = amdgpu_pp_late_init,
|
||||
.sw_init = amdgpu_pp_sw_init,
|
||||
.sw_fini = amdgpu_pp_sw_fini,
|
||||
.hw_init = amdgpu_pp_hw_init,
|
||||
.hw_fini = amdgpu_pp_hw_fini,
|
||||
.late_fini = amdgpu_pp_late_fini,
|
||||
.suspend = amdgpu_pp_suspend,
|
||||
.resume = amdgpu_pp_resume,
|
||||
.is_idle = amdgpu_pp_is_idle,
|
||||
.wait_for_idle = amdgpu_pp_wait_for_idle,
|
||||
.soft_reset = amdgpu_pp_soft_reset,
|
||||
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
|
||||
.set_powergating_state = amdgpu_pp_set_powergating_state,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
};
|
|
@ -105,11 +105,16 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
|||
int ret;
|
||||
|
||||
ww_mutex_lock(&resv->lock, NULL);
|
||||
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
|
||||
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
|
||||
resv, &bo);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
bo->tbo.sg = sg;
|
||||
bo->tbo.ttm->sg = sg;
|
||||
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
|
||||
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
|
||||
if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
|
||||
bo->prime_shared_count = 1;
|
||||
|
||||
|
|
|
@ -62,6 +62,9 @@ static int psp_sw_init(void *handle)
|
|||
|
||||
psp->adev = adev;
|
||||
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
||||
return 0;
|
||||
|
||||
ret = psp_init_microcode(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to load psp firmware!\n");
|
||||
|
@ -75,6 +78,9 @@ static int psp_sw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
||||
return 0;
|
||||
|
||||
release_firmware(adev->psp.sos_fw);
|
||||
adev->psp.sos_fw = NULL;
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
|
@ -453,6 +459,9 @@ static int psp_suspend(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
||||
return 0;
|
||||
|
||||
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP ring stop failed\n");
|
||||
|
|
|
@ -59,9 +59,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0,
|
||||
NULL, NULL, &vram_obj);
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0,
|
||||
ttm_bo_type_kernel, NULL, &vram_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create VRAM object\n");
|
||||
goto out_cleanup;
|
||||
|
@ -80,9 +79,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
void **vram_start, **vram_end;
|
||||
struct dma_fence *fence = NULL;
|
||||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
|
||||
NULL, gtt_obj + i);
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0,
|
||||
ttm_bo_type_kernel, NULL, gtt_obj + i);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create GTT object %d\n", i);
|
||||
goto out_lclean;
|
||||
|
|
|
@ -204,6 +204,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
|||
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
|
||||
};
|
||||
|
||||
if (bo->type == ttm_bo_type_sg) {
|
||||
placement->num_placement = 0;
|
||||
placement->num_busy_placement = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
|
||||
placement->placement = &placements;
|
||||
placement->busy_placement = &placements;
|
||||
|
@ -982,20 +988,20 @@ static struct ttm_backend_func amdgpu_backend_func = {
|
|||
.destroy = &amdgpu_ttm_backend_destroy,
|
||||
};
|
||||
|
||||
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags)
|
||||
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags)
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_ttm_tt *gtt;
|
||||
|
||||
adev = amdgpu_ttm_adev(bdev);
|
||||
adev = amdgpu_ttm_adev(bo->bdev);
|
||||
|
||||
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
|
||||
if (gtt == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
gtt->ttm.ttm.func = &amdgpu_backend_func;
|
||||
if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags)) {
|
||||
if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) {
|
||||
kfree(gtt);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1021,7 +1027,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
|
|||
|
||||
if (slave && ttm->sg) {
|
||||
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
||||
gtt->ttm.dma_address, ttm->num_pages);
|
||||
gtt->ttm.dma_address,
|
||||
ttm->num_pages);
|
||||
ttm->state = tt_unbound;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1335,11 +1342,12 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
|
|||
if (adev->fw_vram_usage.size > 0 &&
|
||||
adev->fw_vram_usage.size <= vram_size) {
|
||||
|
||||
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
|
||||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL,
|
||||
&adev->fw_vram_usage.reserved_bo);
|
||||
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
ttm_bo_type_kernel, NULL,
|
||||
&adev->fw_vram_usage.reserved_bo);
|
||||
if (r)
|
||||
goto error_create;
|
||||
|
||||
|
|
|
@ -413,9 +413,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|||
if (!entry->base.bo) {
|
||||
r = amdgpu_bo_create(adev,
|
||||
amdgpu_vm_bo_size(adev, level),
|
||||
AMDGPU_GPU_PAGE_SIZE, true,
|
||||
AMDGPU_GPU_PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, flags,
|
||||
NULL, resv, &pt);
|
||||
ttm_bo_type_kernel, resv, &pt);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -2409,8 +2409,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
AMDGPU_GEM_CREATE_SHADOW);
|
||||
|
||||
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
|
||||
r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
flags, NULL, NULL, &vm->root.base.bo);
|
||||
r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
|
||||
ttm_bo_type_kernel, NULL, &vm->root.base.bo);
|
||||
if (r)
|
||||
goto error_free_sched_entity;
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#include <linux/backlight.h>
|
||||
#include "bif/bif_4_1_d.h"
|
||||
|
||||
static u8
|
||||
u8
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
|
||||
{
|
||||
u8 backlight_level;
|
||||
|
@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
|
|||
return backlight_level;
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
|
||||
u8 backlight_level)
|
||||
{
|
||||
|
|
|
@ -24,6 +24,11 @@
|
|||
#ifndef __ATOMBIOS_ENCODER_H__
|
||||
#define __ATOMBIOS_ENCODER_H__
|
||||
|
||||
u8
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev);
|
||||
void
|
||||
amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
|
||||
u8 backlight_level);
|
||||
u8
|
||||
amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
|
||||
void
|
||||
|
|
|
@ -65,6 +65,8 @@ MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
|
|||
#define VOLTAGE_VID_OFFSET_SCALE1 625
|
||||
#define VOLTAGE_VID_OFFSET_SCALE2 100
|
||||
|
||||
static const struct amd_pm_funcs ci_dpm_funcs;
|
||||
|
||||
static const struct ci_pt_defaults defaults_hawaii_xt =
|
||||
{
|
||||
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
|
||||
|
@ -6241,6 +6243,7 @@ static int ci_dpm_early_init(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->powerplay.pp_funcs = &ci_dpm_funcs;
|
||||
ci_dpm_set_irq_funcs(adev);
|
||||
|
||||
return 0;
|
||||
|
@ -6760,7 +6763,7 @@ static int ci_dpm_read_sensor(void *handle, int idx,
|
|||
}
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs ci_dpm_ip_funcs = {
|
||||
static const struct amd_ip_funcs ci_dpm_ip_funcs = {
|
||||
.name = "ci_dpm",
|
||||
.early_init = ci_dpm_early_init,
|
||||
.late_init = ci_dpm_late_init,
|
||||
|
@ -6777,7 +6780,16 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
|
|||
.set_powergating_state = ci_dpm_set_powergating_state,
|
||||
};
|
||||
|
||||
const struct amd_pm_funcs ci_dpm_funcs = {
|
||||
const struct amdgpu_ip_block_version ci_smu_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &ci_dpm_ip_funcs,
|
||||
};
|
||||
|
||||
static const struct amd_pm_funcs ci_dpm_funcs = {
|
||||
.pre_set_power_state = &ci_dpm_pre_set_power_state,
|
||||
.set_power_state = &ci_dpm_set_power_state,
|
||||
.post_set_power_state = &ci_dpm_post_set_power_state,
|
||||
|
|
|
@ -67,7 +67,6 @@
|
|||
|
||||
#include "amdgpu_dm.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu_powerplay.h"
|
||||
#include "dce_virtual.h"
|
||||
|
||||
/*
|
||||
|
@ -1887,10 +1886,6 @@ static int cik_common_early_init(void *handle)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
||||
|
||||
amdgpu_device_get_pcie_info(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2000,7 +1995,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
if (amdgpu_dpm == -1)
|
||||
amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
|
@ -2018,7 +2016,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
if (amdgpu_dpm == -1)
|
||||
amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
|
@ -2036,7 +2037,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
|
@ -2055,7 +2056,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
|
|
|
@ -24,8 +24,7 @@
|
|||
#ifndef __CIK_DPM_H__
|
||||
#define __CIK_DPM_H__
|
||||
|
||||
extern const struct amd_ip_funcs ci_dpm_ip_funcs;
|
||||
extern const struct amd_ip_funcs kv_dpm_ip_funcs;
|
||||
extern const struct amd_pm_funcs ci_dpm_funcs;
|
||||
extern const struct amd_pm_funcs kv_dpm_funcs;
|
||||
extern const struct amdgpu_ip_block_version ci_smu_ip_block;
|
||||
extern const struct amdgpu_ip_block_version kv_smu_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2862,6 +2862,11 @@ static int dce_v10_0_hw_fini(void *handle)
|
|||
|
||||
static int dce_v10_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->mode_info.bl_level =
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
|
||||
|
||||
return dce_v10_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
|
@ -2870,6 +2875,9 @@ static int dce_v10_0_resume(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
|
||||
adev->mode_info.bl_level);
|
||||
|
||||
ret = dce_v10_0_hw_init(handle);
|
||||
|
||||
/* turn on the BL */
|
||||
|
|
|
@ -2988,6 +2988,11 @@ static int dce_v11_0_hw_fini(void *handle)
|
|||
|
||||
static int dce_v11_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->mode_info.bl_level =
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
|
||||
|
||||
return dce_v11_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
|
@ -2996,6 +3001,9 @@ static int dce_v11_0_resume(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
|
||||
adev->mode_info.bl_level);
|
||||
|
||||
ret = dce_v11_0_hw_init(handle);
|
||||
|
||||
/* turn on the BL */
|
||||
|
|
|
@ -2730,6 +2730,11 @@ static int dce_v6_0_hw_fini(void *handle)
|
|||
|
||||
static int dce_v6_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->mode_info.bl_level =
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
|
||||
|
||||
return dce_v6_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
|
@ -2738,6 +2743,9 @@ static int dce_v6_0_resume(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
|
||||
adev->mode_info.bl_level);
|
||||
|
||||
ret = dce_v6_0_hw_init(handle);
|
||||
|
||||
/* turn on the BL */
|
||||
|
|
|
@ -2760,6 +2760,11 @@ static int dce_v8_0_hw_fini(void *handle)
|
|||
|
||||
static int dce_v8_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->mode_info.bl_level =
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
|
||||
|
||||
return dce_v8_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
|
@ -2768,6 +2773,9 @@ static int dce_v8_0_resume(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
|
||||
adev->mode_info.bl_level);
|
||||
|
||||
ret = dce_v8_0_hw_init(handle);
|
||||
|
||||
/* turn on the BL */
|
||||
|
|
|
@ -1261,23 +1261,23 @@ static int gfx_v9_0_sw_init(void *handle)
|
|||
adev->gfx.mec.num_queue_per_pipe = 8;
|
||||
|
||||
/* KIQ event */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* EOP Event */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Privileged reg */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 184,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
|
||||
&adev->gfx.priv_reg_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Privileged inst */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 185,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
|
||||
&adev->gfx.priv_inst_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -836,9 +836,9 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||
|
||||
spin_lock_init(&adev->gmc.invalidate_lock);
|
||||
|
||||
adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
|
||||
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
|
||||
} else {
|
||||
|
@ -849,8 +849,6 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||
}
|
||||
break;
|
||||
case CHIP_VEGA10:
|
||||
/* XXX Don't know how to get VRAM type yet. */
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
|
||||
/*
|
||||
* To fulfill 4-level page support,
|
||||
* vm size is 256TB (48bit), maximum size of Vega10,
|
||||
|
@ -863,9 +861,9 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||
}
|
||||
|
||||
/* This interrupt is VMC page fault.*/
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 0,
|
||||
&adev->gmc.vm_fault);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 0,
|
||||
&adev->gmc.vm_fault);
|
||||
|
||||
if (r)
|
||||
|
|
|
@ -42,6 +42,8 @@
|
|||
#define KV_MINIMUM_ENGINE_CLOCK 800
|
||||
#define SMC_RAM_END 0x40000
|
||||
|
||||
static const struct amd_pm_funcs kv_dpm_funcs;
|
||||
|
||||
static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int kv_enable_nb_dpm(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
|
@ -2960,6 +2962,7 @@ static int kv_dpm_early_init(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->powerplay.pp_funcs = &kv_dpm_funcs;
|
||||
kv_dpm_set_irq_funcs(adev);
|
||||
|
||||
return 0;
|
||||
|
@ -3301,7 +3304,7 @@ static int kv_dpm_read_sensor(void *handle, int idx,
|
|||
}
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs kv_dpm_ip_funcs = {
|
||||
static const struct amd_ip_funcs kv_dpm_ip_funcs = {
|
||||
.name = "kv_dpm",
|
||||
.early_init = kv_dpm_early_init,
|
||||
.late_init = kv_dpm_late_init,
|
||||
|
@ -3318,7 +3321,16 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
|
|||
.set_powergating_state = kv_dpm_set_powergating_state,
|
||||
};
|
||||
|
||||
const struct amd_pm_funcs kv_dpm_funcs = {
|
||||
const struct amdgpu_ip_block_version kv_smu_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &kv_dpm_ip_funcs,
|
||||
};
|
||||
|
||||
static const struct amd_pm_funcs kv_dpm_funcs = {
|
||||
.pre_set_power_state = &kv_dpm_pre_set_power_state,
|
||||
.set_power_state = &kv_dpm_set_power_state,
|
||||
.post_set_power_state = &kv_dpm_post_set_power_state,
|
||||
|
|
|
@ -33,56 +33,34 @@
|
|||
|
||||
static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 reg;
|
||||
int timeout = AI_MAILBOX_TIMEDOUT;
|
||||
u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
|
||||
|
||||
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_CONTROL));
|
||||
reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1);
|
||||
WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_CONTROL), reg);
|
||||
|
||||
/*Wait for RCV_MSG_VALID to be 0*/
|
||||
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_CONTROL));
|
||||
while (reg & mask) {
|
||||
if (timeout <= 0) {
|
||||
pr_err("RCV_MSG_VALID is not cleared\n");
|
||||
break;
|
||||
}
|
||||
mdelay(1);
|
||||
timeout -=1;
|
||||
|
||||
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_CONTROL));
|
||||
}
|
||||
WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
|
||||
}
|
||||
|
||||
static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_CONTROL));
|
||||
reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL,
|
||||
TRN_MSG_VALID, val ? 1 : 0);
|
||||
WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL),
|
||||
reg);
|
||||
WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
|
||||
* RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
|
||||
* by host.
|
||||
*
|
||||
* if called no in IRQ routine, this peek_msg cannot guaranteed to return the
|
||||
* correct value since it doesn't return the RCV_DW0 under the case that
|
||||
* RCV_MSG_VALID is set by host.
|
||||
*/
|
||||
static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
|
||||
{
|
||||
return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
|
||||
}
|
||||
|
||||
|
||||
static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
|
||||
enum idh_event event)
|
||||
{
|
||||
u32 reg;
|
||||
u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
|
||||
|
||||
if (event != IDH_FLR_NOTIFICATION_CMPL) {
|
||||
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_CONTROL));
|
||||
if (!(reg & mask))
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
|
||||
|
@ -94,54 +72,67 @@ static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
|
||||
return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
|
||||
}
|
||||
|
||||
static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
|
||||
{
|
||||
int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
|
||||
u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK);
|
||||
u32 reg;
|
||||
int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
|
||||
u8 reg;
|
||||
|
||||
do {
|
||||
reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
|
||||
if (reg & 2)
|
||||
return 0;
|
||||
|
||||
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_CONTROL));
|
||||
while (!(reg & mask)) {
|
||||
if (timeout <= 0) {
|
||||
pr_err("Doesn't get ack from pf.\n");
|
||||
r = -ETIME;
|
||||
break;
|
||||
}
|
||||
mdelay(5);
|
||||
timeout -= 5;
|
||||
} while (timeout > 1);
|
||||
|
||||
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_CONTROL));
|
||||
}
|
||||
pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
|
||||
|
||||
return r;
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
|
||||
{
|
||||
int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
|
||||
|
||||
r = xgpu_ai_mailbox_rcv_msg(adev, event);
|
||||
while (r) {
|
||||
if (timeout <= 0) {
|
||||
pr_err("Doesn't get msg:%d from pf.\n", event);
|
||||
r = -ETIME;
|
||||
break;
|
||||
}
|
||||
mdelay(5);
|
||||
timeout -= 5;
|
||||
int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
|
||||
|
||||
do {
|
||||
r = xgpu_ai_mailbox_rcv_msg(adev, event);
|
||||
}
|
||||
if (!r)
|
||||
return 0;
|
||||
|
||||
return r;
|
||||
msleep(10);
|
||||
timeout -= 10;
|
||||
} while (timeout > 1);
|
||||
|
||||
pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
|
||||
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
|
||||
enum idh_request req, u32 data1, u32 data2, u32 data3) {
|
||||
u32 reg;
|
||||
int r;
|
||||
uint8_t trn;
|
||||
|
||||
/* IMPORTANT:
|
||||
* clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
|
||||
* and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
|
||||
* which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
|
||||
* will return immediatly
|
||||
*/
|
||||
do {
|
||||
xgpu_ai_mailbox_set_valid(adev, false);
|
||||
trn = xgpu_ai_peek_ack(adev);
|
||||
if (trn) {
|
||||
pr_err("trn=%x ACK should not asssert! wait again !\n", trn);
|
||||
msleep(1);
|
||||
}
|
||||
} while(trn);
|
||||
|
||||
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
|
||||
|
@ -245,15 +236,36 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
|
|||
{
|
||||
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
|
||||
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
|
||||
int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
|
||||
int locked;
|
||||
|
||||
/* wait until RCV_MSG become 3 */
|
||||
if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
|
||||
pr_err("failed to recieve FLR_CMPL\n");
|
||||
return;
|
||||
}
|
||||
/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
|
||||
* otherwise the mailbox msg will be ruined/reseted by
|
||||
* the VF FLR.
|
||||
*
|
||||
* we can unlock the lock_reset to allow "amdgpu_job_timedout"
|
||||
* to run gpu_recover() after FLR_NOTIFICATION_CMPL received
|
||||
* which means host side had finished this VF's FLR.
|
||||
*/
|
||||
locked = mutex_trylock(&adev->lock_reset);
|
||||
if (locked)
|
||||
adev->in_gpu_reset = 1;
|
||||
|
||||
/* Trigger recovery due to world switch failure */
|
||||
amdgpu_device_gpu_recover(adev, NULL, false);
|
||||
do {
|
||||
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
|
||||
goto flr_done;
|
||||
|
||||
msleep(10);
|
||||
timeout -= 10;
|
||||
} while (timeout > 1);
|
||||
|
||||
flr_done:
|
||||
if (locked)
|
||||
mutex_unlock(&adev->lock_reset);
|
||||
|
||||
/* Trigger recovery for world switch failure if no TDR */
|
||||
if (amdgpu_lockup_timeout == 0)
|
||||
amdgpu_device_gpu_recover(adev, NULL, true);
|
||||
}
|
||||
|
||||
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||
|
@ -274,24 +286,22 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
|
|||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
int r;
|
||||
enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
|
||||
|
||||
/* trigger gpu-reset by hypervisor only if TDR disbaled */
|
||||
if (!amdgpu_gpu_recovery) {
|
||||
/* see what event we get */
|
||||
r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
|
||||
|
||||
/* sometimes the interrupt is delayed to inject to VM, so under such case
|
||||
* the IDH_FLR_NOTIFICATION is overwritten by VF FLR from GIM side, thus
|
||||
* above recieve message could be failed, we should schedule the flr_work
|
||||
* anyway
|
||||
switch (event) {
|
||||
case IDH_FLR_NOTIFICATION:
|
||||
if (amdgpu_sriov_runtime(adev))
|
||||
schedule_work(&adev->virt.flr_work);
|
||||
break;
|
||||
/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
|
||||
* it byfar since that polling thread will handle it,
|
||||
* other msg like flr complete is not handled here.
|
||||
*/
|
||||
if (r) {
|
||||
DRM_ERROR("FLR_NOTIFICATION is missed\n");
|
||||
xgpu_ai_mailbox_send_ack(adev);
|
||||
}
|
||||
|
||||
schedule_work(&adev->virt.flr_work);
|
||||
case IDH_CLR_MSG_BUF:
|
||||
case IDH_FLR_NOTIFICATION_CMPL:
|
||||
case IDH_READY_TO_ACCESS_GPU:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -319,11 +329,11 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
|
|||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
|
||||
if (r) {
|
||||
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
||||
return r;
|
||||
|
|
|
@ -24,7 +24,9 @@
|
|||
#ifndef __MXGPU_AI_H__
|
||||
#define __MXGPU_AI_H__
|
||||
|
||||
#define AI_MAILBOX_TIMEDOUT 12000
|
||||
#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
|
||||
#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000
|
||||
#define AI_MAILBOX_POLL_FLR_TIMEDOUT 500
|
||||
|
||||
enum idh_request {
|
||||
IDH_REQ_GPU_INIT_ACCESS = 1,
|
||||
|
@ -51,4 +53,7 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev);
|
|||
int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev);
|
||||
void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev);
|
||||
|
||||
#define AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4
|
||||
#define AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4 + 1
|
||||
|
||||
#endif
|
||||
|
|
|
@ -31,8 +31,6 @@
|
|||
#include "sdma0/sdma0_4_0_sh_mask.h"
|
||||
#include "sdma1/sdma1_4_0_offset.h"
|
||||
#include "sdma1/sdma1_4_0_sh_mask.h"
|
||||
#include "mmhub/mmhub_1_0_offset.h"
|
||||
#include "mmhub/mmhub_1_0_sh_mask.h"
|
||||
#include "hdp/hdp_4_0_offset.h"
|
||||
#include "sdma0/sdma0_4_1_default.h"
|
||||
|
||||
|
@ -1172,13 +1170,13 @@ static int sdma_v4_0_sw_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* SDMA trap event */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_SDMA0, 224,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224,
|
||||
&adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* SDMA trap event */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_SDMA1, 224,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224,
|
||||
&adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1333,7 +1331,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
|
|||
{
|
||||
DRM_DEBUG("IH: SDMA trap\n");
|
||||
switch (entry->client_id) {
|
||||
case AMDGPU_IH_CLIENTID_SDMA0:
|
||||
case SOC15_IH_CLIENTID_SDMA0:
|
||||
switch (entry->ring_id) {
|
||||
case 0:
|
||||
amdgpu_fence_process(&adev->sdma.instance[0].ring);
|
||||
|
@ -1349,7 +1347,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
|
|||
break;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_IH_CLIENTID_SDMA1:
|
||||
case SOC15_IH_CLIENTID_SDMA1:
|
||||
switch (entry->ring_id) {
|
||||
case 0:
|
||||
amdgpu_fence_process(&adev->sdma.instance[1].ring);
|
||||
|
@ -1399,7 +1397,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
|
|||
if (def != data)
|
||||
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA10) {
|
||||
if (adev->sdma.num_instances > 1) {
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
|
||||
data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
|
||||
|
@ -1427,7 +1425,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
|
|||
if (def != data)
|
||||
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA10) {
|
||||
if (adev->sdma.num_instances > 1) {
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
|
||||
data |= (SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
|
||||
|
@ -1458,7 +1456,7 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
|
|||
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
|
||||
|
||||
/* 1-not override: enable sdma1 mem light sleep */
|
||||
if (adev->asic_type == CHIP_VEGA10) {
|
||||
if (adev->sdma.num_instances > 1) {
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
|
||||
data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
|
||||
if (def != data)
|
||||
|
@ -1472,7 +1470,7 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
|
|||
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
|
||||
|
||||
/* 0-override:disable sdma1 mem light sleep */
|
||||
if (adev->asic_type == CHIP_VEGA10) {
|
||||
if (adev->sdma.num_instances > 1) {
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
|
||||
data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
|
||||
if (def != data)
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#include "amdgpu_vce.h"
|
||||
#include "atom.h"
|
||||
#include "amd_pcie.h"
|
||||
#include "amdgpu_powerplay.h"
|
||||
#include "si_dpm.h"
|
||||
#include "sid.h"
|
||||
#include "si_ih.h"
|
||||
#include "gfx_v6_0.h"
|
||||
|
@ -1983,7 +1983,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
else
|
||||
|
@ -1997,7 +1997,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
else
|
||||
|
@ -2011,7 +2011,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
|
||||
|
|
|
@ -67,6 +67,8 @@ MODULE_FIRMWARE("radeon/hainan_smc.bin");
|
|||
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
|
||||
|
||||
static const struct amd_pm_funcs si_dpm_funcs;
|
||||
|
||||
union power_info {
|
||||
struct _ATOM_POWERPLAY_INFO info;
|
||||
struct _ATOM_POWERPLAY_INFO_V2 info_2;
|
||||
|
@ -7914,6 +7916,7 @@ static int si_dpm_early_init(void *handle)
|
|||
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->powerplay.pp_funcs = &si_dpm_funcs;
|
||||
si_dpm_set_irq_funcs(adev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -8014,7 +8017,7 @@ static int si_dpm_read_sensor(void *handle, int idx,
|
|||
}
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs si_dpm_ip_funcs = {
|
||||
static const struct amd_ip_funcs si_dpm_ip_funcs = {
|
||||
.name = "si_dpm",
|
||||
.early_init = si_dpm_early_init,
|
||||
.late_init = si_dpm_late_init,
|
||||
|
@ -8031,7 +8034,16 @@ const struct amd_ip_funcs si_dpm_ip_funcs = {
|
|||
.set_powergating_state = si_dpm_set_powergating_state,
|
||||
};
|
||||
|
||||
const struct amd_pm_funcs si_dpm_funcs = {
|
||||
const struct amdgpu_ip_block_version si_smu_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &si_dpm_ip_funcs,
|
||||
};
|
||||
|
||||
static const struct amd_pm_funcs si_dpm_funcs = {
|
||||
.pre_set_power_state = &si_dpm_pre_set_power_state,
|
||||
.set_power_state = &si_dpm_set_power_state,
|
||||
.post_set_power_state = &si_dpm_post_set_power_state,
|
||||
|
|
|
@ -245,8 +245,7 @@ enum si_display_gap
|
|||
SI_PM_DISPLAY_GAP_IGNORE = 3,
|
||||
};
|
||||
|
||||
extern const struct amd_ip_funcs si_dpm_ip_funcs;
|
||||
extern const struct amd_pm_funcs si_dpm_funcs;
|
||||
extern const struct amdgpu_ip_block_version si_smu_ip_block;
|
||||
|
||||
struct ni_leakage_coeffients
|
||||
{
|
||||
|
|
|
@ -57,7 +57,6 @@
|
|||
#include "uvd_v7_0.h"
|
||||
#include "vce_v4_0.h"
|
||||
#include "vcn_v1_0.h"
|
||||
#include "amdgpu_powerplay.h"
|
||||
#include "dce_virtual.h"
|
||||
#include "mxgpu_ai.h"
|
||||
|
||||
|
@ -531,10 +530,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
|
||||
if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
|
||||
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
|
@ -553,7 +551,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
|
@ -692,10 +690,6 @@ static int soc15_common_early_init(void *handle)
|
|||
xgpu_ai_mailbox_set_irq_funcs(adev);
|
||||
}
|
||||
|
||||
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
||||
|
||||
amdgpu_device_get_pcie_info(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -390,13 +390,13 @@ static int uvd_v7_0_sw_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* UVD TRAP */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* UVD ENC TRAP */
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -420,7 +420,7 @@ static int vce_v4_0_sw_init(void *handle)
|
|||
unsigned size;
|
||||
int r, i;
|
||||
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -75,13 +75,13 @@ static int vcn_v1_0_sw_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* VCN DEC TRAP */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* VCN ENC TRAP */
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, i + 119,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
|
||||
&adev->vcn.irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -245,8 +245,8 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
|
|||
* some faults get cleared.
|
||||
*/
|
||||
switch (dw0 & 0xff) {
|
||||
case AMDGPU_IH_CLIENTID_VMC:
|
||||
case AMDGPU_IH_CLIENTID_UTCL2:
|
||||
case SOC15_IH_CLIENTID_VMC:
|
||||
case SOC15_IH_CLIENTID_UTCL2:
|
||||
break;
|
||||
default:
|
||||
/* Not a VM fault */
|
||||
|
|
|
@ -71,7 +71,6 @@
|
|||
#include "uvd_v5_0.h"
|
||||
#include "uvd_v6_0.h"
|
||||
#include "vce_v3_0.h"
|
||||
#include "amdgpu_powerplay.h"
|
||||
#if defined(CONFIG_DRM_AMD_ACP)
|
||||
#include "amdgpu_acp.h"
|
||||
#endif
|
||||
|
@ -1097,11 +1096,6 @@ static int vi_common_early_init(void *handle)
|
|||
xgpu_vi_mailbox_set_irq_funcs(adev);
|
||||
}
|
||||
|
||||
/* vi use smc load by default */
|
||||
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
||||
|
||||
amdgpu_device_get_pcie_info(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1516,7 +1510,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
|
@ -1526,7 +1520,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
|
@ -1546,7 +1540,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
|
@ -1568,7 +1562,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
|
@ -1586,7 +1580,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
|
@ -1607,7 +1601,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
|
|
|
@ -11,7 +11,7 @@ config DRM_AMD_DC
|
|||
|
||||
config DRM_AMD_DC_PRE_VEGA
|
||||
bool "DC support for Polaris and older ASICs"
|
||||
default n
|
||||
default y
|
||||
help
|
||||
Choose this option to enable the new DC support for older asics
|
||||
by default. This includes Polaris, Carrizo, Tonga, Bonaire,
|
||||
|
|
|
@ -1131,7 +1131,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
|
|||
|
||||
if (adev->asic_type == CHIP_VEGA10 ||
|
||||
adev->asic_type == CHIP_RAVEN)
|
||||
client_id = AMDGPU_IH_CLIENTID_DCE;
|
||||
client_id = SOC15_IH_CLIENTID_DCE;
|
||||
|
||||
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
|
||||
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
|
||||
|
@ -1231,7 +1231,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
|
|||
for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
|
||||
i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
|
||||
i++) {
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
|
||||
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to add crtc irq id!\n");
|
||||
|
@ -1255,7 +1255,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
|
|||
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
|
||||
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
|
||||
i++) {
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to add page flip irq id!\n");
|
||||
return r;
|
||||
|
@ -1276,7 +1276,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
/* HPD */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
|
||||
&adev->hpd_irq);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to add hpd irq id!\n");
|
||||
|
@ -1365,6 +1365,43 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
|
|||
|
||||
#endif
|
||||
|
||||
static int initialize_plane(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_mode_info *mode_info,
|
||||
int plane_id)
|
||||
{
|
||||
struct amdgpu_plane *plane;
|
||||
unsigned long possible_crtcs;
|
||||
int ret = 0;
|
||||
|
||||
plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
|
||||
mode_info->planes[plane_id] = plane;
|
||||
|
||||
if (!plane) {
|
||||
DRM_ERROR("KMS: Failed to allocate plane\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
plane->base.type = mode_info->plane_type[plane_id];
|
||||
|
||||
/*
|
||||
* HACK: IGT tests expect that each plane can only have one
|
||||
* one possible CRTC. For now, set one CRTC for each
|
||||
* plane that is not an underlay, but still allow multiple
|
||||
* CRTCs for underlay planes.
|
||||
*/
|
||||
possible_crtcs = 1 << plane_id;
|
||||
if (plane_id >= dm->dc->caps.max_streams)
|
||||
possible_crtcs = 0xff;
|
||||
|
||||
ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("KMS: Failed to initialize plane\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* In this architecture, the association
|
||||
* connector -> encoder -> crtc
|
||||
* id not really requried. The crtc and connector will hold the
|
||||
|
@ -1375,12 +1412,12 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
|
|||
static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
uint32_t i;
|
||||
int32_t i;
|
||||
struct amdgpu_dm_connector *aconnector = NULL;
|
||||
struct amdgpu_encoder *aencoder = NULL;
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
uint32_t link_cnt;
|
||||
unsigned long possible_crtcs;
|
||||
int32_t total_overlay_planes, total_primary_planes;
|
||||
|
||||
link_cnt = dm->dc->caps.max_links;
|
||||
if (amdgpu_dm_mode_config_init(dm->adev)) {
|
||||
|
@ -1388,30 +1425,22 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
|||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < dm->dc->caps.max_planes; i++) {
|
||||
struct amdgpu_plane *plane;
|
||||
/* Identify the number of planes to be initialized */
|
||||
total_overlay_planes = dm->dc->caps.max_slave_planes;
|
||||
total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
|
||||
|
||||
plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
|
||||
mode_info->planes[i] = plane;
|
||||
|
||||
if (!plane) {
|
||||
DRM_ERROR("KMS: Failed to allocate plane\n");
|
||||
/* First initialize overlay planes, index starting after primary planes */
|
||||
for (i = (total_overlay_planes - 1); i >= 0; i--) {
|
||||
if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
|
||||
DRM_ERROR("KMS: Failed to initialize overlay plane\n");
|
||||
goto fail;
|
||||
}
|
||||
plane->base.type = mode_info->plane_type[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* HACK: IGT tests expect that each plane can only have one
|
||||
* one possible CRTC. For now, set one CRTC for each
|
||||
* plane that is not an underlay, but still allow multiple
|
||||
* CRTCs for underlay planes.
|
||||
*/
|
||||
possible_crtcs = 1 << i;
|
||||
if (i >= dm->dc->caps.max_streams)
|
||||
possible_crtcs = 0xff;
|
||||
|
||||
if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
|
||||
DRM_ERROR("KMS: Failed to initialize plane\n");
|
||||
/* Initialize primary planes */
|
||||
for (i = (total_primary_planes - 1); i >= 0; i--) {
|
||||
if (initialize_plane(dm, mode_info, i)) {
|
||||
DRM_ERROR("KMS: Failed to initialize primary plane\n");
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
@ -1982,6 +2011,10 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
|
|||
* every time.
|
||||
*/
|
||||
ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
|
||||
if (ret) {
|
||||
dc_transfer_func_release(dc_plane_state->in_transfer_func);
|
||||
dc_plane_state->in_transfer_func = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -4691,8 +4724,8 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
int ret = 0;
|
||||
|
||||
|
||||
/* Add new planes */
|
||||
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
|
||||
/* Add new planes, in reverse order as DC expectation */
|
||||
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
|
||||
new_plane_crtc = new_plane_state->crtc;
|
||||
old_plane_crtc = old_plane_state->crtc;
|
||||
dm_new_plane_state = to_dm_plane_state(new_plane_state);
|
||||
|
@ -4737,6 +4770,7 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
*lock_and_validation_needed = true;
|
||||
|
||||
} else { /* Add new planes */
|
||||
struct dc_plane_state *dc_new_plane_state;
|
||||
|
||||
if (drm_atomic_plane_disabling(plane->state, new_plane_state))
|
||||
continue;
|
||||
|
@ -4755,34 +4789,42 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
|
||||
WARN_ON(dm_new_plane_state->dc_state);
|
||||
|
||||
dm_new_plane_state->dc_state = dc_create_plane_state(dc);
|
||||
dc_new_plane_state = dc_create_plane_state(dc);
|
||||
if (!dc_new_plane_state)
|
||||
return -ENOMEM;
|
||||
|
||||
DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
|
||||
plane->base.id, new_plane_crtc->base.id);
|
||||
|
||||
if (!dm_new_plane_state->dc_state) {
|
||||
ret = -EINVAL;
|
||||
ret = fill_plane_attributes(
|
||||
new_plane_crtc->dev->dev_private,
|
||||
dc_new_plane_state,
|
||||
new_plane_state,
|
||||
new_crtc_state);
|
||||
if (ret) {
|
||||
dc_plane_state_release(dc_new_plane_state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = fill_plane_attributes(
|
||||
new_plane_crtc->dev->dev_private,
|
||||
dm_new_plane_state->dc_state,
|
||||
new_plane_state,
|
||||
new_crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Any atomic check errors that occur after this will
|
||||
* not need a release. The plane state will be attached
|
||||
* to the stream, and therefore part of the atomic
|
||||
* state. It'll be released when the atomic state is
|
||||
* cleaned.
|
||||
*/
|
||||
if (!dc_add_plane_to_context(
|
||||
dc,
|
||||
dm_new_crtc_state->stream,
|
||||
dm_new_plane_state->dc_state,
|
||||
dc_new_plane_state,
|
||||
dm_state->context)) {
|
||||
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
dc_plane_state_release(dc_new_plane_state);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dm_new_plane_state->dc_state = dc_new_plane_state;
|
||||
|
||||
/* Tell DC to do a full surface update every time there
|
||||
* is a plane change. Inefficient, but works for now.
|
||||
*/
|
||||
|
@ -4812,6 +4854,9 @@ static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
|
|||
return -EDEADLK;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
|
||||
if (IS_ERR(crtc_state))
|
||||
return PTR_ERR(crtc_state);
|
||||
|
||||
if (crtc->primary == plane && crtc_state->active) {
|
||||
if (!plane_state->fb)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -193,6 +193,7 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
|
|||
struct drm_property_blob *blob = crtc->base.ctm;
|
||||
struct dc_stream_state *stream = crtc->stream;
|
||||
struct drm_color_ctm *ctm;
|
||||
int64_t val;
|
||||
int i;
|
||||
|
||||
if (!blob) {
|
||||
|
@ -206,7 +207,9 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
|
|||
* DRM gives a 3x3 matrix, but DC wants 3x4. Assuming we're operating
|
||||
* with homogeneous coordinates, augment the matrix with 0's.
|
||||
*
|
||||
* The format provided is S31.32, which is the same as our fixed31_32.
|
||||
* The format provided is S31.32, using signed-magnitude representation.
|
||||
* Our fixed31_32 is also S31.32, but is using 2's complement. We have
|
||||
* to convert from signed-magnitude to 2's complement.
|
||||
*/
|
||||
for (i = 0; i < 12; i++) {
|
||||
/* Skip 4th element */
|
||||
|
@ -214,8 +217,14 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
|
|||
stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero;
|
||||
continue;
|
||||
}
|
||||
/* csc[i] = ctm[i - floor(i/4)] */
|
||||
stream->gamut_remap_matrix.matrix[i].value = ctm->matrix[i - (i/4)];
|
||||
|
||||
/* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
|
||||
val = ctm->matrix[i - (i/4)];
|
||||
/* If negative, convert to 2's complement. */
|
||||
if (val & (1ULL << 63))
|
||||
val = -(val & ~(1ULL << 63));
|
||||
|
||||
stream->gamut_remap_matrix.matrix[i].value = val;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -85,6 +85,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
|||
enum ddc_result res;
|
||||
ssize_t read_bytes;
|
||||
|
||||
if (WARN_ON(msg->size > 16))
|
||||
return -E2BIG;
|
||||
|
||||
switch (msg->request & ~DP_AUX_I2C_MOT) {
|
||||
case DP_AUX_NATIVE_READ:
|
||||
read_bytes = dal_ddc_service_read_dpcd_data(
|
||||
|
|
|
@ -60,7 +60,8 @@ static const struct dc_log_type_info log_type_info_tbl[] = {
|
|||
{LOG_EVENT_LINK_LOSS, "LinkLoss"},
|
||||
{LOG_EVENT_UNDERFLOW, "Underflow"},
|
||||
{LOG_IF_TRACE, "InterfaceTrace"},
|
||||
{LOG_DTN, "DTN"}
|
||||
{LOG_DTN, "DTN"},
|
||||
{LOG_PROFILING, "Profiling"}
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
|
||||
#include "bios_parser_common.h"
|
||||
#define LAST_RECORD_TYPE 0xff
|
||||
|
||||
#define SMU9_SYSPLL0_ID 0
|
||||
|
||||
struct i2c_id_config_access {
|
||||
uint8_t bfI2C_LineMux:4;
|
||||
|
@ -1220,7 +1220,7 @@ static unsigned int bios_parser_get_smu_clock_info(
|
|||
if (!bp->cmd_tbl.get_smu_clock_info)
|
||||
return BP_RESULT_FAILURE;
|
||||
|
||||
return bp->cmd_tbl.get_smu_clock_info(bp);
|
||||
return bp->cmd_tbl.get_smu_clock_info(bp, 0);
|
||||
}
|
||||
|
||||
static enum bp_result bios_parser_program_crtc_timing(
|
||||
|
@ -1376,7 +1376,7 @@ static enum bp_result get_firmware_info_v3_1(
|
|||
if (bp->cmd_tbl.get_smu_clock_info != NULL) {
|
||||
/* VBIOS gives in 10KHz */
|
||||
info->smu_gpu_pll_output_freq =
|
||||
bp->cmd_tbl.get_smu_clock_info(bp) * 10;
|
||||
bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
|
||||
}
|
||||
|
||||
return BP_RESULT_OK;
|
||||
|
|
|
@ -796,7 +796,7 @@ static enum bp_result set_dce_clock_v2_1(
|
|||
******************************************************************************
|
||||
*****************************************************************************/
|
||||
|
||||
static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp);
|
||||
static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id);
|
||||
|
||||
static void init_get_smu_clock_info(struct bios_parser *bp)
|
||||
{
|
||||
|
@ -805,12 +805,13 @@ static void init_get_smu_clock_info(struct bios_parser *bp)
|
|||
|
||||
}
|
||||
|
||||
static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp)
|
||||
static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
|
||||
{
|
||||
struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0};
|
||||
struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output;
|
||||
|
||||
smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ;
|
||||
smu_input.syspll_id = id;
|
||||
|
||||
/* Get Specific Clock */
|
||||
if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) {
|
||||
|
|
|
@ -96,7 +96,7 @@ struct cmd_tbl {
|
|||
struct bios_parser *bp,
|
||||
struct bp_set_dce_clock_parameters *bp_params);
|
||||
unsigned int (*get_smu_clock_info)(
|
||||
struct bios_parser *bp);
|
||||
struct bios_parser *bp, uint8_t id);
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -85,7 +85,6 @@ static void calculate_bandwidth(
|
|||
const uint32_t s_mid5 = 5;
|
||||
const uint32_t s_mid6 = 6;
|
||||
const uint32_t s_high = 7;
|
||||
const uint32_t bus_efficiency = 1;
|
||||
const uint32_t dmif_chunk_buff_margin = 1;
|
||||
|
||||
uint32_t max_chunks_fbc_mode;
|
||||
|
@ -592,7 +591,12 @@ static void calculate_bandwidth(
|
|||
/* 1 = use channel 0 and 1*/
|
||||
/* 2 = use channel 0,1,2,3*/
|
||||
if ((fbc_enabled == 1 && lpt_enabled == 1)) {
|
||||
data->dram_efficiency = bw_int_to_fixed(1);
|
||||
if (vbios->memory_type == bw_def_hbm)
|
||||
data->dram_efficiency = bw_frc_to_fixed(5, 10);
|
||||
else
|
||||
data->dram_efficiency = bw_int_to_fixed(1);
|
||||
|
||||
|
||||
if (dceip->low_power_tiling_mode == 0) {
|
||||
data->number_of_dram_channels = 1;
|
||||
}
|
||||
|
@ -607,7 +611,10 @@ static void calculate_bandwidth(
|
|||
}
|
||||
}
|
||||
else {
|
||||
data->dram_efficiency = bw_frc_to_fixed(8, 10);
|
||||
if (vbios->memory_type == bw_def_hbm)
|
||||
data->dram_efficiency = bw_frc_to_fixed(5, 10);
|
||||
else
|
||||
data->dram_efficiency = bw_frc_to_fixed(8, 10);
|
||||
}
|
||||
/*memory request size and latency hiding:*/
|
||||
/*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
|
||||
|
@ -1171,9 +1178,9 @@ static void calculate_bandwidth(
|
|||
}
|
||||
for (i = 0; i <= 2; i++) {
|
||||
for (j = 0; j <= 7; j++) {
|
||||
data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
|
||||
data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))));
|
||||
if (data->d1_display_write_back_dwb_enable == 1) {
|
||||
data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
|
||||
data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(sclk[j], vbios->data_return_bus_width))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1258,6 +1265,16 @@ static void calculate_bandwidth(
|
|||
/* / (dispclk - display bw)*/
|
||||
/*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
|
||||
/*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
|
||||
|
||||
/*initialize variables*/
|
||||
number_of_displays_enabled = 0;
|
||||
number_of_displays_enabled_with_margin = 0;
|
||||
for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
|
||||
if (data->enable[k]) {
|
||||
number_of_displays_enabled = number_of_displays_enabled + 1;
|
||||
}
|
||||
data->display_pstate_change_enable[k] = 0;
|
||||
}
|
||||
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
|
||||
if (data->enable[i]) {
|
||||
if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
|
||||
|
@ -1276,7 +1293,10 @@ static void calculate_bandwidth(
|
|||
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
|
||||
if (data->enable[i]) {
|
||||
if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
|
||||
data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
|
||||
if (number_of_displays_enabled > 2)
|
||||
data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(2)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
|
||||
else
|
||||
data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
|
||||
}
|
||||
else {
|
||||
data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
|
||||
|
@ -1338,24 +1358,15 @@ static void calculate_bandwidth(
|
|||
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
|
||||
if (data->enable[i]) {
|
||||
if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1) {
|
||||
data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
|
||||
data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
|
||||
}
|
||||
else {
|
||||
/*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
|
||||
data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
|
||||
data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
|
||||
}
|
||||
data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
|
||||
}
|
||||
}
|
||||
/*initialize variables*/
|
||||
number_of_displays_enabled = 0;
|
||||
number_of_displays_enabled_with_margin = 0;
|
||||
for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
|
||||
if (data->enable[k]) {
|
||||
number_of_displays_enabled = number_of_displays_enabled + 1;
|
||||
}
|
||||
data->display_pstate_change_enable[k] = 0;
|
||||
}
|
||||
for (i = 0; i <= 2; i++) {
|
||||
for (j = 0; j <= 7; j++) {
|
||||
data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
|
||||
|
@ -1370,10 +1381,11 @@ static void calculate_bandwidth(
|
|||
/*determine the minimum dram clock change margin for each set of clock frequencies*/
|
||||
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
|
||||
/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
|
||||
data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
|
||||
if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
|
||||
data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
|
||||
if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
|
||||
data->display_pstate_change_enable[k] = 1;
|
||||
data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
|
||||
data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1383,10 +1395,11 @@ static void calculate_bandwidth(
|
|||
/*determine the minimum dram clock change margin for each display pipe*/
|
||||
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
|
||||
/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
|
||||
data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
|
||||
if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
|
||||
data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
|
||||
if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
|
||||
data->display_pstate_change_enable[k] = 1;
|
||||
data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
|
||||
data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1420,7 +1433,7 @@ static void calculate_bandwidth(
|
|||
data->displays_with_same_mode[i] = bw_int_to_fixed(0);
|
||||
if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
|
||||
for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
|
||||
if ((data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
|
||||
if ((i == j || data->display_synchronization_enabled) && (data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
|
||||
data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
|
||||
}
|
||||
}
|
||||
|
@ -1435,7 +1448,7 @@ static void calculate_bandwidth(
|
|||
/*aligned displays with the same timing.*/
|
||||
/*the display(s) with the negative margin can be switched in the v_blank region while the other*/
|
||||
/*displays are in v_blank or v_active.*/
|
||||
if ((number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk))) {
|
||||
if (number_of_displays_enabled_with_margin > 0 && (number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin) == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk)) {
|
||||
data->nbp_state_change_enable = bw_def_yes;
|
||||
}
|
||||
else {
|
||||
|
@ -1448,6 +1461,25 @@ static void calculate_bandwidth(
|
|||
else {
|
||||
nbp_state_change_enable_blank = bw_def_no;
|
||||
}
|
||||
|
||||
/*average bandwidth*/
|
||||
/*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
|
||||
/*the average bandwidth with compression is the same, divided by the compression ratio*/
|
||||
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
|
||||
if (data->enable[i]) {
|
||||
data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
|
||||
data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
|
||||
}
|
||||
}
|
||||
data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
|
||||
data->total_average_bandwidth = bw_int_to_fixed(0);
|
||||
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
|
||||
if (data->enable[i]) {
|
||||
data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
|
||||
data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/*required yclk(pclk)*/
|
||||
/*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
|
||||
/*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
|
||||
|
@ -1497,17 +1529,20 @@ static void calculate_bandwidth(
|
|||
}
|
||||
else {
|
||||
data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
|
||||
if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
|
||||
if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[low]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
|
||||
&& bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
|
||||
yclk_message = bw_fixed_to_int(vbios->low_yclk);
|
||||
data->y_clk_level = low;
|
||||
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
|
||||
}
|
||||
else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
|
||||
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[mid]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
|
||||
&& bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
|
||||
yclk_message = bw_fixed_to_int(vbios->mid_yclk);
|
||||
data->y_clk_level = mid;
|
||||
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
|
||||
}
|
||||
else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
|
||||
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[high]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
|
||||
&& bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
|
||||
yclk_message = bw_fixed_to_int(vbios->high_yclk);
|
||||
data->y_clk_level = high;
|
||||
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
|
||||
|
@ -1523,8 +1558,8 @@ static void calculate_bandwidth(
|
|||
/*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
|
||||
/*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
|
||||
/*for dmif, pte and cursor requests have to be included.*/
|
||||
data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
|
||||
data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
|
||||
data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
|
||||
data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), vbios->data_return_bus_width);
|
||||
if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
|
||||
data->required_sclk = bw_int_to_fixed(9999);
|
||||
sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
|
||||
|
@ -1537,42 +1572,56 @@ static void calculate_bandwidth(
|
|||
}
|
||||
else {
|
||||
data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
|
||||
if (bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
|
||||
if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[low]),vbios->data_return_bus_width))
|
||||
&& bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
|
||||
sclk_message = bw_def_low;
|
||||
data->sclk_level = s_low;
|
||||
data->required_sclk = vbios->low_sclk;
|
||||
}
|
||||
else if (bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
|
||||
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[mid]),vbios->data_return_bus_width))
|
||||
&& bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
|
||||
sclk_message = bw_def_mid;
|
||||
data->sclk_level = s_mid1;
|
||||
data->required_sclk = vbios->mid1_sclk;
|
||||
}
|
||||
else if (bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
|
||||
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid2]),vbios->data_return_bus_width))
|
||||
&& bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
|
||||
sclk_message = bw_def_mid;
|
||||
data->sclk_level = s_mid2;
|
||||
data->required_sclk = vbios->mid2_sclk;
|
||||
}
|
||||
else if (bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
|
||||
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid3]),vbios->data_return_bus_width))
|
||||
&& bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
|
||||
sclk_message = bw_def_mid;
|
||||
data->sclk_level = s_mid3;
|
||||
data->required_sclk = vbios->mid3_sclk;
|
||||
}
|
||||
else if (bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
|
||||
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid4]),vbios->data_return_bus_width))
|
||||
&& bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
|
||||
sclk_message = bw_def_mid;
|
||||
data->sclk_level = s_mid4;
|
||||
data->required_sclk = vbios->mid4_sclk;
|
||||
}
|
||||
else if (bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
|
||||
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid5]),vbios->data_return_bus_width))
|
||||
&& bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
|
||||
sclk_message = bw_def_mid;
|
||||
data->sclk_level = s_mid5;
|
||||
data->required_sclk = vbios->mid5_sclk;
|
||||
}
|
||||
else if (bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
|
||||
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid6]),vbios->data_return_bus_width))
|
||||
&& bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
|
||||
sclk_message = bw_def_mid;
|
||||
data->sclk_level = s_mid6;
|
||||
data->required_sclk = vbios->mid6_sclk;
|
||||
}
|
||||
else if (bw_ltn(data->required_sclk, sclk[s_high])) {
|
||||
else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
|
||||
&& bw_ltn(data->required_sclk, sclk[s_high])) {
|
||||
sclk_message = bw_def_high;
|
||||
data->sclk_level = s_high;
|
||||
data->required_sclk = vbios->high_sclk;
|
||||
}
|
||||
else if (bw_meq(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
|
||||
&& bw_ltn(data->required_sclk, sclk[s_high])) {
|
||||
sclk_message = bw_def_high;
|
||||
data->sclk_level = s_high;
|
||||
data->required_sclk = vbios->high_sclk;
|
||||
|
@ -1681,7 +1730,7 @@ static void calculate_bandwidth(
|
|||
data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
|
||||
data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
|
||||
}
|
||||
if (data->nbp_state_change_enable == bw_def_yes) {
|
||||
if (data->nbp_state_change_enable == bw_def_yes && data->increase_voltage_to_support_mclk_switch) {
|
||||
data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
|
||||
data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
|
||||
}
|
||||
|
@ -1861,23 +1910,6 @@ static void calculate_bandwidth(
|
|||
else {
|
||||
data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
|
||||
}
|
||||
/*average bandwidth*/
|
||||
/*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
|
||||
/*the average bandwidth with compression is the same, divided by the compression ratio*/
|
||||
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
|
||||
if (data->enable[i]) {
|
||||
data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
|
||||
data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
|
||||
}
|
||||
}
|
||||
data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
|
||||
data->total_average_bandwidth = bw_int_to_fixed(0);
|
||||
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
|
||||
if (data->enable[i]) {
|
||||
data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
|
||||
data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
|
||||
}
|
||||
}
|
||||
/*stutter efficiency*/
|
||||
/*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/
|
||||
/*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
|
||||
|
@ -1905,7 +1937,7 @@ static void calculate_bandwidth(
|
|||
data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
|
||||
}
|
||||
}
|
||||
data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_min2(bw_mul(data->dram_bandwidth, data->dmifdram_access_efficiency), bw_mul(sclk[data->sclk_level], bw_int_to_fixed(32))));
|
||||
data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_mul(sclk[data->sclk_level], vbios->data_return_bus_width));
|
||||
data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
|
||||
data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
|
||||
data->time_in_self_refresh = data->min_stutter_refresh_duration;
|
||||
|
@ -1957,7 +1989,7 @@ static void calculate_bandwidth(
|
|||
for (i = 1; i <= 5; i++) {
|
||||
data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
|
||||
if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
|
||||
data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
|
||||
data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
|
||||
}
|
||||
else {
|
||||
data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
|
||||
|
@ -2036,6 +2068,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
|
|||
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
|
||||
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
|
||||
|
||||
dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
|
||||
dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
|
||||
dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
|
||||
dceip.large_cursor = false;
|
||||
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
|
||||
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
|
||||
|
@ -2146,6 +2181,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
|
|||
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
|
||||
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
|
||||
|
||||
dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
|
||||
dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
|
||||
dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
|
||||
dceip.large_cursor = false;
|
||||
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
|
||||
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
|
||||
|
@ -2259,6 +2297,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
|
|||
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
|
||||
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
|
||||
|
||||
dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
|
||||
dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
|
||||
dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
|
||||
dceip.large_cursor = false;
|
||||
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
|
||||
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
|
||||
|
@ -2369,6 +2410,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
|
|||
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
|
||||
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
|
||||
|
||||
dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
|
||||
dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
|
||||
dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
|
||||
dceip.large_cursor = false;
|
||||
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
|
||||
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
|
||||
|
@ -2479,6 +2523,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
|
|||
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
|
||||
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
|
||||
|
||||
dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
|
||||
dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
|
||||
dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
|
||||
dceip.large_cursor = false;
|
||||
dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
|
||||
dceip.dmif_pipe_en_fbc_chunk_tracker = true;
|
||||
|
@ -2597,6 +2644,7 @@ static void populate_initial_data(
|
|||
data->graphics_tiling_mode = bw_def_tiled;
|
||||
data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
|
||||
data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
|
||||
data->increase_voltage_to_support_mclk_switch = true;
|
||||
|
||||
/* Pipes with underlay first */
|
||||
for (i = 0; i < pipe_count; i++) {
|
||||
|
|
|
@ -983,8 +983,6 @@ bool dcn_validate_bandwidth(
|
|||
context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
|
||||
}
|
||||
|
||||
context->bw.dcn.calc_clk.dram_ccm_us = (int)(v->dram_clock_change_margin);
|
||||
context->bw.dcn.calc_clk.min_active_dram_ccm_us = (int)(v->min_active_dram_clock_change_margin);
|
||||
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
|
||||
context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
|
||||
|
||||
|
@ -998,7 +996,26 @@ bool dcn_validate_bandwidth(
|
|||
dc->debug.min_disp_clk_khz;
|
||||
}
|
||||
|
||||
context->bw.dcn.calc_clk.max_dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
|
||||
context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
|
||||
|
||||
switch (v->voltage_level) {
|
||||
case 0:
|
||||
context->bw.dcn.calc_clk.max_supported_dppclk_khz =
|
||||
(int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
|
||||
break;
|
||||
case 1:
|
||||
context->bw.dcn.calc_clk.max_supported_dppclk_khz =
|
||||
(int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
|
||||
break;
|
||||
case 2:
|
||||
context->bw.dcn.calc_clk.max_supported_dppclk_khz =
|
||||
(int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
|
||||
break;
|
||||
default:
|
||||
context->bw.dcn.calc_clk.max_supported_dppclk_khz =
|
||||
(int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include "dmcu.h"
|
||||
#include "dpp.h"
|
||||
#include "timing_generator.h"
|
||||
#include "abm.h"
|
||||
#include "virtual/virtual_link_encoder.h"
|
||||
|
||||
#include "link_hwss.h"
|
||||
|
@ -802,6 +803,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
|||
if (!dcb->funcs->is_accelerated_mode(dcb))
|
||||
dc->hwss.enable_accelerated_mode(dc, context);
|
||||
|
||||
dc->hwss.set_bandwidth(dc, context, false);
|
||||
|
||||
/* re-program planes for existing stream, in case we need to
|
||||
* free up plane resource for later use
|
||||
*/
|
||||
|
@ -870,6 +873,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
|||
|
||||
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
|
||||
|
||||
/* pplib is notified if disp_num changed */
|
||||
dc->hwss.set_bandwidth(dc, context, true);
|
||||
|
||||
dc_release_state(dc->current_state);
|
||||
|
||||
dc->current_state = context;
|
||||
|
@ -1104,9 +1110,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
|
|||
if (u->plane_info->input_tf != u->surface->input_tf)
|
||||
update_flags->bits.input_tf_change = 1;
|
||||
|
||||
if (u->plane_info->sdr_white_level != u->surface->sdr_white_level)
|
||||
update_flags->bits.output_tf_change = 1;
|
||||
|
||||
if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
|
||||
update_flags->bits.horizontal_mirror_change = 1;
|
||||
|
||||
|
@ -1361,6 +1364,17 @@ static void commit_planes_for_stream(struct dc *dc,
|
|||
|
||||
dc->hwss.apply_ctx_for_surface(
|
||||
dc, pipe_ctx->stream, stream_status->plane_count, context);
|
||||
|
||||
if (stream_update && stream_update->abm_level && pipe_ctx->stream_res.abm) {
|
||||
if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
|
||||
// if otg funcs defined check if blanked before programming
|
||||
if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
|
||||
pipe_ctx->stream_res.abm->funcs->set_abm_level(
|
||||
pipe_ctx->stream_res.abm, stream->abm_level);
|
||||
} else
|
||||
pipe_ctx->stream_res.abm->funcs->set_abm_level(
|
||||
pipe_ctx->stream_res.abm, stream->abm_level);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -36,26 +36,22 @@
|
|||
#include "hw_sequencer.h"
|
||||
|
||||
#include "resource.h"
|
||||
#define DC_LOGGER \
|
||||
logger
|
||||
|
||||
#define SURFACE_TRACE(...) do {\
|
||||
if (dc->debug.surface_trace) \
|
||||
dm_logger_write(logger, \
|
||||
LOG_IF_TRACE, \
|
||||
##__VA_ARGS__); \
|
||||
DC_LOG_IF_TRACE(__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define TIMING_TRACE(...) do {\
|
||||
if (dc->debug.timing_trace) \
|
||||
dm_logger_write(logger, \
|
||||
LOG_SYNC, \
|
||||
##__VA_ARGS__); \
|
||||
DC_LOG_SYNC(__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define CLOCK_TRACE(...) do {\
|
||||
if (dc->debug.clock_trace) \
|
||||
dm_logger_write(logger, \
|
||||
LOG_BANDWIDTH_CALCS, \
|
||||
##__VA_ARGS__); \
|
||||
DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
void pre_surface_trace(
|
||||
|
@ -362,25 +358,19 @@ void context_clock_trace(
|
|||
struct dal_logger *logger = core_dc->ctx->logger;
|
||||
|
||||
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
|
||||
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n"
|
||||
"dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
|
||||
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
|
||||
context->bw.dcn.calc_clk.dispclk_khz,
|
||||
context->bw.dcn.calc_clk.max_dppclk_khz,
|
||||
context->bw.dcn.calc_clk.dppclk_khz,
|
||||
context->bw.dcn.calc_clk.dcfclk_khz,
|
||||
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
|
||||
context->bw.dcn.calc_clk.fclk_khz,
|
||||
context->bw.dcn.calc_clk.socclk_khz,
|
||||
context->bw.dcn.calc_clk.dram_ccm_us,
|
||||
context->bw.dcn.calc_clk.min_active_dram_ccm_us);
|
||||
context->bw.dcn.calc_clk.socclk_khz);
|
||||
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
|
||||
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n"
|
||||
"dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
|
||||
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
|
||||
context->bw.dcn.calc_clk.dispclk_khz,
|
||||
context->bw.dcn.calc_clk.max_dppclk_khz,
|
||||
context->bw.dcn.calc_clk.dppclk_khz,
|
||||
context->bw.dcn.calc_clk.dcfclk_khz,
|
||||
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
|
||||
context->bw.dcn.calc_clk.fclk_khz,
|
||||
context->bw.dcn.calc_clk.dram_ccm_us,
|
||||
context->bw.dcn.calc_clk.min_active_dram_ccm_us);
|
||||
context->bw.dcn.calc_clk.fclk_khz);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -1960,6 +1960,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
|
|||
(abm->funcs->set_backlight_level == NULL))
|
||||
return false;
|
||||
|
||||
if (stream) {
|
||||
if (stream->bl_pwm_level == 0)
|
||||
frame_ramp = 0;
|
||||
|
||||
((struct dc_stream_state *)stream)->bl_pwm_level = level;
|
||||
}
|
||||
|
||||
use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
|
||||
|
||||
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level);
|
||||
|
|
|
@ -1124,6 +1124,7 @@ bool dc_add_plane_to_context(
|
|||
ASSERT(tail_pipe);
|
||||
|
||||
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
|
||||
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
|
||||
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
|
||||
free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
|
||||
free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
|
||||
|
@ -1736,6 +1737,10 @@ enum dc_status resource_map_pool_resources(
|
|||
pipe_ctx->stream_res.audio, true);
|
||||
}
|
||||
|
||||
/* Add ABM to the resource if on EDP */
|
||||
if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal))
|
||||
pipe_ctx->stream_res.abm = pool->abm;
|
||||
|
||||
for (i = 0; i < context->stream_count; i++)
|
||||
if (context->streams[i] == stream) {
|
||||
context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
|
||||
|
|
|
@ -198,8 +198,7 @@ bool dc_stream_set_cursor_attributes(
|
|||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
|
||||
if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm &&
|
||||
!pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)
|
||||
if (pipe_ctx->stream != stream)
|
||||
continue;
|
||||
if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
|
||||
continue;
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
#include "inc/compressor.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#define DC_VER "3.1.37"
|
||||
#define DC_VER "3.1.38"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_STREAMS 6
|
||||
|
@ -186,13 +186,12 @@ enum wm_report_mode {
|
|||
|
||||
struct dc_clocks {
|
||||
int dispclk_khz;
|
||||
int max_dppclk_khz;
|
||||
int max_supported_dppclk_khz;
|
||||
int dppclk_khz;
|
||||
int dcfclk_khz;
|
||||
int socclk_khz;
|
||||
int dcfclk_deep_sleep_khz;
|
||||
int fclk_khz;
|
||||
int dram_ccm_us;
|
||||
int min_active_dram_ccm_us;
|
||||
};
|
||||
|
||||
struct dc_debug {
|
||||
|
@ -447,6 +446,7 @@ union surface_update_flags {
|
|||
|
||||
struct dc_plane_state {
|
||||
struct dc_plane_address address;
|
||||
struct dc_plane_flip_time time;
|
||||
struct scaling_taps scaling_quality;
|
||||
struct rect src_rect;
|
||||
struct rect dst_rect;
|
||||
|
@ -557,6 +557,7 @@ struct dc_transfer_func *dc_create_transfer_func(void);
|
|||
*/
|
||||
struct dc_flip_addrs {
|
||||
struct dc_plane_address address;
|
||||
unsigned int flip_timestamp_in_us;
|
||||
bool flip_immediate;
|
||||
/* TODO: add flip duration for FreeSync */
|
||||
};
|
||||
|
|
|
@ -692,8 +692,18 @@ struct crtc_trigger_info {
|
|||
enum trigger_delay delay;
|
||||
};
|
||||
|
||||
struct dc_crtc_timing {
|
||||
enum vrr_state {
|
||||
VRR_STATE_OFF = 0,
|
||||
VRR_STATE_VARIABLE,
|
||||
VRR_STATE_FIXED,
|
||||
};
|
||||
|
||||
struct dc_crtc_timing_adjust {
|
||||
uint32_t v_total_min;
|
||||
uint32_t v_total_max;
|
||||
};
|
||||
|
||||
struct dc_crtc_timing {
|
||||
uint32_t h_total;
|
||||
uint32_t h_border_left;
|
||||
uint32_t h_addressable;
|
||||
|
|
|
@ -48,6 +48,8 @@ struct dc_stream_status {
|
|||
struct dc_stream_state {
|
||||
struct dc_sink *sink;
|
||||
struct dc_crtc_timing timing;
|
||||
struct dc_crtc_timing_adjust timing_adjust;
|
||||
struct vrr_params vrr_params;
|
||||
|
||||
struct rect src; /* composition area */
|
||||
struct rect dst; /* stream addressable area */
|
||||
|
@ -74,6 +76,10 @@ struct dc_stream_state {
|
|||
unsigned char psr_version;
|
||||
/* TODO: CEA VIC */
|
||||
|
||||
/* DMCU info */
|
||||
unsigned int abm_level;
|
||||
unsigned int bl_pwm_level;
|
||||
|
||||
/* from core_stream struct */
|
||||
struct dc_context *ctx;
|
||||
|
||||
|
@ -106,6 +112,7 @@ struct dc_stream_update {
|
|||
struct dc_transfer_func *out_transfer_func;
|
||||
struct dc_hdr_static_metadata *hdr_static_metadata;
|
||||
enum color_transfer_func color_output_tf;
|
||||
unsigned int *abm_level;
|
||||
};
|
||||
|
||||
bool dc_is_stream_unchanged(
|
||||
|
|
|
@ -521,6 +521,24 @@ struct audio_info {
|
|||
struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
|
||||
};
|
||||
|
||||
struct vrr_params {
|
||||
enum vrr_state state;
|
||||
uint32_t window_min;
|
||||
uint32_t window_max;
|
||||
uint32_t inserted_frame_duration_in_us;
|
||||
uint32_t frames_to_insert;
|
||||
uint32_t frame_counter;
|
||||
};
|
||||
|
||||
#define DC_PLANE_UPDATE_TIMES_MAX 10
|
||||
|
||||
struct dc_plane_flip_time {
|
||||
unsigned int time_elapsed_in_us[DC_PLANE_UPDATE_TIMES_MAX];
|
||||
unsigned int index;
|
||||
unsigned int prev_update_time_in_us;
|
||||
};
|
||||
|
||||
// Will combine with vrr_params at some point.
|
||||
struct freesync_context {
|
||||
bool supported;
|
||||
bool enabled;
|
||||
|
|
|
@ -46,6 +46,23 @@
|
|||
SR(SMU_INTERRUPT_CONTROL), \
|
||||
SR(DC_DMCU_SCRATCH)
|
||||
|
||||
#define DMCU_DCE80_REG_LIST() \
|
||||
SR(DMCU_CTRL), \
|
||||
SR(DMCU_STATUS), \
|
||||
SR(DMCU_RAM_ACCESS_CTRL), \
|
||||
SR(DMCU_IRAM_WR_CTRL), \
|
||||
SR(DMCU_IRAM_WR_DATA), \
|
||||
SR(MASTER_COMM_DATA_REG1), \
|
||||
SR(MASTER_COMM_DATA_REG2), \
|
||||
SR(MASTER_COMM_DATA_REG3), \
|
||||
SR(MASTER_COMM_CMD_REG), \
|
||||
SR(MASTER_COMM_CNTL_REG), \
|
||||
SR(DMCU_IRAM_RD_CTRL), \
|
||||
SR(DMCU_IRAM_RD_DATA), \
|
||||
SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
|
||||
SR(SMU_INTERRUPT_CONTROL), \
|
||||
SR(DC_DMCU_SCRATCH)
|
||||
|
||||
#define DMCU_DCE110_COMMON_REG_LIST() \
|
||||
DMCU_COMMON_REG_LIST_DCE_BASE(), \
|
||||
SR(DCI_MEM_PWR_STATUS)
|
||||
|
@ -83,6 +100,24 @@
|
|||
STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \
|
||||
DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
|
||||
|
||||
#define DMCU_MASK_SH_LIST_DCE80(mask_sh) \
|
||||
DMCU_SF(DMCU_CTRL, \
|
||||
DMCU_ENABLE, mask_sh), \
|
||||
DMCU_SF(DMCU_STATUS, \
|
||||
UC_IN_STOP_MODE, mask_sh), \
|
||||
DMCU_SF(DMCU_STATUS, \
|
||||
UC_IN_RESET, mask_sh), \
|
||||
DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
|
||||
IRAM_HOST_ACCESS_EN, mask_sh), \
|
||||
DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
|
||||
IRAM_WR_ADDR_AUTO_INC, mask_sh), \
|
||||
DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
|
||||
IRAM_RD_ADDR_AUTO_INC, mask_sh), \
|
||||
DMCU_SF(MASTER_COMM_CMD_REG, \
|
||||
MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
|
||||
DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
|
||||
DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
|
||||
|
||||
#define DMCU_MASK_SH_LIST_DCE110(mask_sh) \
|
||||
DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
|
||||
DMCU_SF(DCI_MEM_PWR_STATUS, \
|
||||
|
|
|
@ -56,7 +56,7 @@ void dce_pipe_control_lock(struct dc *dc,
|
|||
if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
|
||||
return;
|
||||
|
||||
val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->pipe_idx],
|
||||
val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst],
|
||||
BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph,
|
||||
BLND_SCL_V_UPDATE_LOCK, &scl,
|
||||
BLND_BLND_V_UPDATE_LOCK, &blnd,
|
||||
|
@ -67,19 +67,19 @@ void dce_pipe_control_lock(struct dc *dc,
|
|||
blnd = lock_val;
|
||||
update_lock_mode = lock_val;
|
||||
|
||||
REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val,
|
||||
REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val,
|
||||
BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph,
|
||||
BLND_SCL_V_UPDATE_LOCK, scl);
|
||||
|
||||
if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0)
|
||||
REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val,
|
||||
REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val,
|
||||
BLND_BLND_V_UPDATE_LOCK, blnd,
|
||||
BLND_V_UPDATE_LOCK_MODE, update_lock_mode);
|
||||
|
||||
if (hws->wa.blnd_crtc_trigger) {
|
||||
if (!lock) {
|
||||
uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->pipe_idx]);
|
||||
REG_WRITE(CRTC_H_BLANK_START_END[pipe->pipe_idx], value);
|
||||
uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst]);
|
||||
REG_WRITE(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst], value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -190,6 +190,7 @@
|
|||
SR(D2VGA_CONTROL), \
|
||||
SR(D3VGA_CONTROL), \
|
||||
SR(D4VGA_CONTROL), \
|
||||
SR(VGA_TEST_CONTROL), \
|
||||
SR(DC_IP_REQUEST_CNTL), \
|
||||
BL_REG_LIST()
|
||||
|
||||
|
@ -261,6 +262,7 @@ struct dce_hwseq_registers {
|
|||
uint32_t D2VGA_CONTROL;
|
||||
uint32_t D3VGA_CONTROL;
|
||||
uint32_t D4VGA_CONTROL;
|
||||
uint32_t VGA_TEST_CONTROL;
|
||||
/* MMHUB registers. read only. temporary hack */
|
||||
uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
|
||||
uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
|
||||
|
@ -327,6 +329,8 @@ struct dce_hwseq_registers {
|
|||
HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
|
||||
SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
|
||||
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
|
||||
|
||||
|
@ -403,7 +407,15 @@ struct dce_hwseq_registers {
|
|||
HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
|
||||
HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
|
||||
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
|
||||
HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
|
||||
HWS_SF(, D2VGA_CONTROL, D2VGA_MODE_ENABLE, mask_sh),\
|
||||
HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
|
||||
HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
|
||||
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
|
||||
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
|
||||
|
||||
#define HWSEQ_REG_FIELD_LIST(type) \
|
||||
|
@ -436,7 +448,9 @@ struct dce_hwseq_registers {
|
|||
type ENABLE_L1_TLB;\
|
||||
type SYSTEM_ACCESS_MODE;\
|
||||
type LVTMA_BLON;\
|
||||
type LVTMA_PWRSEQ_TARGET_STATE_R;
|
||||
type LVTMA_PWRSEQ_TARGET_STATE_R;\
|
||||
type LVTMA_DIGON;\
|
||||
type LVTMA_DIGON_OVRD;
|
||||
|
||||
#define HWSEQ_DCN_REG_FIELD_LIST(type) \
|
||||
type HUBP_VTG_SEL; \
|
||||
|
@ -483,7 +497,13 @@ struct dce_hwseq_registers {
|
|||
type DCFCLK_GATE_DIS; \
|
||||
type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
|
||||
type DENTIST_DPPCLK_WDIVIDER; \
|
||||
type DENTIST_DISPCLK_WDIVIDER;
|
||||
type DENTIST_DISPCLK_WDIVIDER; \
|
||||
type VGA_TEST_ENABLE; \
|
||||
type VGA_TEST_RENDER_START; \
|
||||
type D1VGA_MODE_ENABLE; \
|
||||
type D2VGA_MODE_ENABLE; \
|
||||
type D3VGA_MODE_ENABLE; \
|
||||
type D4VGA_MODE_ENABLE;
|
||||
|
||||
struct dce_hwseq_shift {
|
||||
HWSEQ_REG_FIELD_LIST(uint8_t)
|
||||
|
|
|
@ -767,8 +767,7 @@ void dce110_link_encoder_construct(
|
|||
bp_cap_info.DP_HBR3_EN;
|
||||
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
|
||||
} else {
|
||||
dm_logger_write(enc110->base.ctx->logger, LOG_WARNING,
|
||||
"%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
|
||||
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
|
||||
__func__,
|
||||
result);
|
||||
}
|
||||
|
|
|
@ -51,6 +51,9 @@
|
|||
#include "dce/dce_10_0_d.h"
|
||||
#include "dce/dce_10_0_sh_mask.h"
|
||||
|
||||
#include "dce/dce_dmcu.h"
|
||||
#include "dce/dce_abm.h"
|
||||
|
||||
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
|
||||
#include "gmc/gmc_8_2_d.h"
|
||||
#include "gmc/gmc_8_2_sh_mask.h"
|
||||
|
@ -320,7 +323,29 @@ static const struct dce110_clk_src_mask cs_mask = {
|
|||
CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
|
||||
};
|
||||
|
||||
static const struct dce_dmcu_registers dmcu_regs = {
|
||||
DMCU_DCE110_COMMON_REG_LIST()
|
||||
};
|
||||
|
||||
static const struct dce_dmcu_shift dmcu_shift = {
|
||||
DMCU_MASK_SH_LIST_DCE110(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce_dmcu_mask dmcu_mask = {
|
||||
DMCU_MASK_SH_LIST_DCE110(_MASK)
|
||||
};
|
||||
|
||||
static const struct dce_abm_registers abm_regs = {
|
||||
ABM_DCE110_COMMON_REG_LIST()
|
||||
};
|
||||
|
||||
static const struct dce_abm_shift abm_shift = {
|
||||
ABM_MASK_SH_LIST_DCE110(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce_abm_mask abm_mask = {
|
||||
ABM_MASK_SH_LIST_DCE110(_MASK)
|
||||
};
|
||||
|
||||
#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
|
||||
|
||||
|
@ -622,6 +647,12 @@ static void destruct(struct dce110_resource_pool *pool)
|
|||
if (pool->base.display_clock != NULL)
|
||||
dce_disp_clk_destroy(&pool->base.display_clock);
|
||||
|
||||
if (pool->base.abm != NULL)
|
||||
dce_abm_destroy(&pool->base.abm);
|
||||
|
||||
if (pool->base.dmcu != NULL)
|
||||
dce_dmcu_destroy(&pool->base.dmcu);
|
||||
|
||||
if (pool->base.irqs != NULL)
|
||||
dal_irq_service_destroy(&pool->base.irqs);
|
||||
}
|
||||
|
@ -829,6 +860,25 @@ static bool construct(
|
|||
goto res_create_fail;
|
||||
}
|
||||
|
||||
pool->base.dmcu = dce_dmcu_create(ctx,
|
||||
&dmcu_regs,
|
||||
&dmcu_shift,
|
||||
&dmcu_mask);
|
||||
if (pool->base.dmcu == NULL) {
|
||||
dm_error("DC: failed to create dmcu!\n");
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto res_create_fail;
|
||||
}
|
||||
|
||||
pool->base.abm = dce_abm_create(ctx,
|
||||
&abm_regs,
|
||||
&abm_shift,
|
||||
&abm_mask);
|
||||
if (pool->base.abm == NULL) {
|
||||
dm_error("DC: failed to create abm!\n");
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto res_create_fail;
|
||||
}
|
||||
|
||||
/* get static clock information for PPLIB or firmware, save
|
||||
* max_clock_state
|
||||
|
|
|
@ -737,10 +737,14 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws)
|
|||
|
||||
static bool is_panel_powered_on(struct dce_hwseq *hws)
|
||||
{
|
||||
uint32_t value;
|
||||
uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
|
||||
|
||||
REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &value);
|
||||
return value == 1;
|
||||
|
||||
REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
|
||||
|
||||
REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
|
||||
|
||||
return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
|
||||
}
|
||||
|
||||
static enum bp_result link_transmitter_control(
|
||||
|
@ -1002,8 +1006,10 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
|
|||
if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms);
|
||||
|
||||
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
|
||||
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
|
||||
link->dc->hwss.edp_backlight_control(link, true);
|
||||
stream->bl_pwm_level = 0;
|
||||
}
|
||||
}
|
||||
void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
|
@ -1128,7 +1134,7 @@ static void build_audio_output(
|
|||
static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color)
|
||||
{
|
||||
uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->pipe_idx) / 4;
|
||||
uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->stream_res.tg->inst) / 4;
|
||||
|
||||
switch (pipe_ctx->plane_res.scl_data.format) {
|
||||
case PIXEL_FORMAT_ARGB8888:
|
||||
|
@ -2106,9 +2112,6 @@ enum dc_status dce110_apply_ctx_to_hw(
|
|||
return status;
|
||||
}
|
||||
|
||||
/* pplib is notified if disp_num changed */
|
||||
dc->hwss.set_bandwidth(dc, context, true);
|
||||
|
||||
/* to save power */
|
||||
apply_min_clocks(dc, context, &clocks_state, false);
|
||||
|
||||
|
@ -2936,15 +2939,18 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
|
|||
{
|
||||
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
|
||||
|
||||
if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
|
||||
if (pipe_ctx->plane_res.ipp &&
|
||||
pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
|
||||
pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
|
||||
pipe_ctx->plane_res.ipp, attributes);
|
||||
|
||||
if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
|
||||
if (pipe_ctx->plane_res.mi &&
|
||||
pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
|
||||
pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
|
||||
pipe_ctx->plane_res.mi, attributes);
|
||||
|
||||
if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
|
||||
if (pipe_ctx->plane_res.xfm &&
|
||||
pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
|
||||
pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
|
||||
pipe_ctx->plane_res.xfm, attributes);
|
||||
}
|
||||
|
|
|
@ -53,6 +53,8 @@
|
|||
|
||||
#include "reg_helper.h"
|
||||
|
||||
#include "dce/dce_dmcu.h"
|
||||
#include "dce/dce_abm.h"
|
||||
/* TODO remove this include */
|
||||
|
||||
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
|
||||
|
@ -364,6 +366,29 @@ static const struct resource_caps res_cap_83 = {
|
|||
.num_pll = 2,
|
||||
};
|
||||
|
||||
static const struct dce_dmcu_registers dmcu_regs = {
|
||||
DMCU_DCE80_REG_LIST()
|
||||
};
|
||||
|
||||
static const struct dce_dmcu_shift dmcu_shift = {
|
||||
DMCU_MASK_SH_LIST_DCE80(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce_dmcu_mask dmcu_mask = {
|
||||
DMCU_MASK_SH_LIST_DCE80(_MASK)
|
||||
};
|
||||
static const struct dce_abm_registers abm_regs = {
|
||||
ABM_DCE110_COMMON_REG_LIST()
|
||||
};
|
||||
|
||||
static const struct dce_abm_shift abm_shift = {
|
||||
ABM_MASK_SH_LIST_DCE110(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce_abm_mask abm_mask = {
|
||||
ABM_MASK_SH_LIST_DCE110(_MASK)
|
||||
};
|
||||
|
||||
#define CTX ctx
|
||||
#define REG(reg) mm ## reg
|
||||
|
||||
|
@ -643,6 +668,12 @@ static void destruct(struct dce110_resource_pool *pool)
|
|||
}
|
||||
}
|
||||
|
||||
if (pool->base.abm != NULL)
|
||||
dce_abm_destroy(&pool->base.abm);
|
||||
|
||||
if (pool->base.dmcu != NULL)
|
||||
dce_dmcu_destroy(&pool->base.dmcu);
|
||||
|
||||
if (pool->base.dp_clock_source != NULL)
|
||||
dce80_clock_source_destroy(&pool->base.dp_clock_source);
|
||||
|
||||
|
@ -850,7 +881,25 @@ static bool dce80_construct(
|
|||
goto res_create_fail;
|
||||
}
|
||||
|
||||
pool->base.dmcu = dce_dmcu_create(ctx,
|
||||
&dmcu_regs,
|
||||
&dmcu_shift,
|
||||
&dmcu_mask);
|
||||
if (pool->base.dmcu == NULL) {
|
||||
dm_error("DC: failed to create dmcu!\n");
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto res_create_fail;
|
||||
}
|
||||
|
||||
pool->base.abm = dce_abm_create(ctx,
|
||||
&abm_regs,
|
||||
&abm_shift,
|
||||
&abm_mask);
|
||||
if (pool->base.abm == NULL) {
|
||||
dm_error("DC: failed to create abm!\n");
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto res_create_fail;
|
||||
}
|
||||
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
|
||||
pool->base.display_clock->max_clks_state =
|
||||
static_clk_info.max_clocks_state;
|
||||
|
@ -1016,6 +1065,25 @@ static bool dce81_construct(
|
|||
goto res_create_fail;
|
||||
}
|
||||
|
||||
pool->base.dmcu = dce_dmcu_create(ctx,
|
||||
&dmcu_regs,
|
||||
&dmcu_shift,
|
||||
&dmcu_mask);
|
||||
if (pool->base.dmcu == NULL) {
|
||||
dm_error("DC: failed to create dmcu!\n");
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto res_create_fail;
|
||||
}
|
||||
|
||||
pool->base.abm = dce_abm_create(ctx,
|
||||
&abm_regs,
|
||||
&abm_shift,
|
||||
&abm_mask);
|
||||
if (pool->base.abm == NULL) {
|
||||
dm_error("DC: failed to create abm!\n");
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto res_create_fail;
|
||||
}
|
||||
|
||||
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
|
||||
pool->base.display_clock->max_clks_state =
|
||||
|
@ -1178,6 +1246,25 @@ static bool dce83_construct(
|
|||
goto res_create_fail;
|
||||
}
|
||||
|
||||
pool->base.dmcu = dce_dmcu_create(ctx,
|
||||
&dmcu_regs,
|
||||
&dmcu_shift,
|
||||
&dmcu_mask);
|
||||
if (pool->base.dmcu == NULL) {
|
||||
dm_error("DC: failed to create dmcu!\n");
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto res_create_fail;
|
||||
}
|
||||
|
||||
pool->base.abm = dce_abm_create(ctx,
|
||||
&abm_regs,
|
||||
&abm_shift,
|
||||
&abm_mask);
|
||||
if (pool->base.abm == NULL) {
|
||||
dm_error("DC: failed to create abm!\n");
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto res_create_fail;
|
||||
}
|
||||
|
||||
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
|
||||
pool->base.display_clock->max_clks_state =
|
||||
|
|
|
@ -464,6 +464,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
|
|||
.set_cursor_attributes = dpp1_set_cursor_attributes,
|
||||
.set_cursor_position = dpp1_set_cursor_position,
|
||||
.dpp_dppclk_control = dpp1_dppclk_control,
|
||||
.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
|
||||
};
|
||||
|
||||
static struct dpp_caps dcn10_dpp_cap = {
|
||||
|
|
|
@ -113,7 +113,8 @@
|
|||
SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
|
||||
SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
|
||||
SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
|
||||
SRI(DPP_CONTROL, DPP_TOP, id)
|
||||
SRI(DPP_CONTROL, DPP_TOP, id), \
|
||||
SRI(CM_HDR_MULT_COEF, CM, id)
|
||||
|
||||
|
||||
|
||||
|
@ -308,7 +309,8 @@
|
|||
TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
|
||||
TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
|
||||
TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
|
||||
TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh)
|
||||
TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
|
||||
TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh)
|
||||
|
||||
#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\
|
||||
TF_REG_LIST_SH_MASK_DCN(mask_sh),\
|
||||
|
@ -1012,7 +1014,8 @@
|
|||
type CUR0_COLOR0; \
|
||||
type CUR0_COLOR1; \
|
||||
type DPPCLK_RATE_CONTROL; \
|
||||
type DPP_CLOCK_ENABLE;
|
||||
type DPP_CLOCK_ENABLE; \
|
||||
type CM_HDR_MULT_COEF;
|
||||
|
||||
struct dcn_dpp_shift {
|
||||
TF_REG_FIELD_LIST(uint8_t)
|
||||
|
@ -1258,7 +1261,8 @@ struct dcn_dpp_mask {
|
|||
uint32_t CURSOR0_CONTROL; \
|
||||
uint32_t CURSOR0_COLOR0; \
|
||||
uint32_t CURSOR0_COLOR1; \
|
||||
uint32_t DPP_CONTROL;
|
||||
uint32_t DPP_CONTROL; \
|
||||
uint32_t CM_HDR_MULT_COEF;
|
||||
|
||||
struct dcn_dpp_registers {
|
||||
DPP_COMMON_REG_VARIABLE_LIST
|
||||
|
@ -1414,6 +1418,10 @@ void dpp1_dppclk_control(
|
|||
bool dppclk_div,
|
||||
bool enable);
|
||||
|
||||
void dpp1_set_hdr_multiplier(
|
||||
struct dpp *dpp_base,
|
||||
uint32_t multiplier);
|
||||
|
||||
void dpp1_construct(struct dcn10_dpp *dpp1,
|
||||
struct dc_context *ctx,
|
||||
uint32_t inst,
|
||||
|
|
|
@ -804,3 +804,12 @@ void dpp1_program_input_lut(
|
|||
REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
|
||||
REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
|
||||
}
|
||||
|
||||
void dpp1_set_hdr_multiplier(
|
||||
struct dpp *dpp_base,
|
||||
uint32_t multiplier)
|
||||
{
|
||||
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
|
||||
|
||||
REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
|
||||
}
|
||||
|
|
|
@ -220,10 +220,34 @@ static void enable_power_gating_plane(
|
|||
static void disable_vga(
|
||||
struct dce_hwseq *hws)
|
||||
{
|
||||
unsigned int in_vga1_mode = 0;
|
||||
unsigned int in_vga2_mode = 0;
|
||||
unsigned int in_vga3_mode = 0;
|
||||
unsigned int in_vga4_mode = 0;
|
||||
|
||||
REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
|
||||
REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
|
||||
REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
|
||||
REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
|
||||
|
||||
if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
|
||||
in_vga3_mode == 0 && in_vga4_mode == 0)
|
||||
return;
|
||||
|
||||
REG_WRITE(D1VGA_CONTROL, 0);
|
||||
REG_WRITE(D2VGA_CONTROL, 0);
|
||||
REG_WRITE(D3VGA_CONTROL, 0);
|
||||
REG_WRITE(D4VGA_CONTROL, 0);
|
||||
|
||||
/* HW Engineer's Notes:
|
||||
* During switch from vga->extended, if we set the VGA_TEST_ENABLE and
|
||||
* then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
|
||||
*
|
||||
* Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
|
||||
* VGA_TEST_ENABLE, to leave it in the same state as before.
|
||||
*/
|
||||
REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
|
||||
REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
|
||||
}
|
||||
|
||||
static void dpp_pg_control(
|
||||
|
@ -1685,16 +1709,22 @@ static void update_dchubp_dpp(
|
|||
union plane_size size = plane_state->plane_size;
|
||||
|
||||
/* depends on DML calculation, DPP clock value may change dynamically */
|
||||
/* If request max dpp clk is lower than current dispclk, no need to
|
||||
* divided by 2
|
||||
*/
|
||||
if (plane_state->update_flags.bits.full_update) {
|
||||
bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <=
|
||||
context->bw.dcn.cur_clk.dispclk_khz / 2;
|
||||
|
||||
dpp->funcs->dpp_dppclk_control(
|
||||
dpp,
|
||||
context->bw.dcn.calc_clk.max_dppclk_khz <
|
||||
context->bw.dcn.calc_clk.dispclk_khz,
|
||||
should_divided_by_2,
|
||||
true);
|
||||
|
||||
dc->current_state->bw.dcn.cur_clk.max_dppclk_khz =
|
||||
context->bw.dcn.calc_clk.max_dppclk_khz;
|
||||
context->bw.dcn.cur_clk.max_dppclk_khz = context->bw.dcn.calc_clk.max_dppclk_khz;
|
||||
dc->current_state->bw.dcn.cur_clk.dppclk_khz =
|
||||
should_divided_by_2 ?
|
||||
context->bw.dcn.cur_clk.dispclk_khz / 2 :
|
||||
context->bw.dcn.cur_clk.dispclk_khz;
|
||||
}
|
||||
|
||||
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
|
||||
|
@ -1780,14 +1810,62 @@ static void update_dchubp_dpp(
|
|||
hubp->funcs->set_blank(hubp, false);
|
||||
}
|
||||
|
||||
static void dcn10_otg_blank(
|
||||
struct dc *dc,
|
||||
struct stream_resource stream_res,
|
||||
struct dc_stream_state *stream,
|
||||
bool blank)
|
||||
{
|
||||
enum dc_color_space color_space;
|
||||
struct tg_color black_color = {0};
|
||||
|
||||
/* program otg blank color */
|
||||
color_space = stream->output_color_space;
|
||||
color_space_to_black_color(dc, color_space, &black_color);
|
||||
|
||||
if (stream_res.tg->funcs->set_blank_color)
|
||||
stream_res.tg->funcs->set_blank_color(
|
||||
stream_res.tg,
|
||||
&black_color);
|
||||
|
||||
if (!blank) {
|
||||
if (stream_res.tg->funcs->set_blank)
|
||||
stream_res.tg->funcs->set_blank(stream_res.tg, blank);
|
||||
if (stream_res.abm)
|
||||
stream_res.abm->funcs->set_abm_level(stream_res.abm, stream->abm_level);
|
||||
} else if (blank) {
|
||||
if (stream_res.abm)
|
||||
stream_res.abm->funcs->set_abm_immediate_disable(stream_res.abm);
|
||||
if (stream_res.tg->funcs->set_blank)
|
||||
stream_res.tg->funcs->set_blank(stream_res.tg, blank);
|
||||
}
|
||||
}
|
||||
|
||||
static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct fixed31_32 multiplier = dal_fixed31_32_from_fraction(
|
||||
pipe_ctx->plane_state->sdr_white_level, 80);
|
||||
uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
|
||||
struct custom_float_format fmt;
|
||||
|
||||
fmt.exponenta_bits = 6;
|
||||
fmt.mantissa_bits = 12;
|
||||
fmt.sign = true;
|
||||
|
||||
if (pipe_ctx->plane_state->sdr_white_level > 80)
|
||||
convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
|
||||
|
||||
pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
|
||||
pipe_ctx->plane_res.dpp, hw_mult);
|
||||
}
|
||||
|
||||
static void program_all_pipe_in_tree(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct dc_state *context)
|
||||
{
|
||||
|
||||
if (pipe_ctx->top_pipe == NULL) {
|
||||
bool blank = !is_pipe_tree_visible(pipe_ctx);
|
||||
|
||||
pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
|
||||
pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
|
||||
|
@ -1798,10 +1876,8 @@ static void program_all_pipe_in_tree(
|
|||
pipe_ctx->stream_res.tg->funcs->program_global_sync(
|
||||
pipe_ctx->stream_res.tg);
|
||||
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_blank)
|
||||
pipe_ctx->stream_res.tg->funcs->set_blank(
|
||||
pipe_ctx->stream_res.tg,
|
||||
!is_pipe_tree_visible(pipe_ctx));
|
||||
dcn10_otg_blank(dc, pipe_ctx->stream_res,
|
||||
pipe_ctx->stream, blank);
|
||||
}
|
||||
|
||||
if (pipe_ctx->plane_state != NULL) {
|
||||
|
@ -1810,6 +1886,8 @@ static void program_all_pipe_in_tree(
|
|||
|
||||
update_dchubp_dpp(dc, pipe_ctx, context);
|
||||
|
||||
set_hdr_multiplier(pipe_ctx);
|
||||
|
||||
if (pipe_ctx->plane_state->update_flags.bits.full_update ||
|
||||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
|
||||
pipe_ctx->plane_state->update_flags.bits.gamma_change)
|
||||
|
@ -1836,16 +1914,10 @@ static void dcn10_pplib_apply_display_requirements(
|
|||
{
|
||||
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
|
||||
|
||||
pp_display_cfg->all_displays_in_sync = false;/*todo*/
|
||||
pp_display_cfg->nb_pstate_switch_disable = false;
|
||||
pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
|
||||
pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
|
||||
pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
|
||||
pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
|
||||
pp_display_cfg->avail_mclk_switch_time_us =
|
||||
context->bw.dcn.cur_clk.dram_ccm_us > 0 ? context->bw.dcn.cur_clk.dram_ccm_us : 0;
|
||||
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us =
|
||||
context->bw.dcn.cur_clk.min_active_dram_ccm_us > 0 ? context->bw.dcn.cur_clk.min_active_dram_ccm_us : 0;
|
||||
pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
|
||||
pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
|
||||
dce110_fill_display_configs(context, pp_display_cfg);
|
||||
|
@ -1908,29 +1980,23 @@ static void dcn10_apply_ctx_for_surface(
|
|||
{
|
||||
int i;
|
||||
struct timing_generator *tg;
|
||||
struct output_pixel_processor *opp;
|
||||
bool removed_pipe[4] = { false };
|
||||
unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
|
||||
bool program_water_mark = false;
|
||||
struct dc_context *ctx = dc->ctx;
|
||||
|
||||
struct pipe_ctx *top_pipe_to_program =
|
||||
find_top_pipe_for_stream(dc, context, stream);
|
||||
|
||||
if (!top_pipe_to_program)
|
||||
return;
|
||||
|
||||
opp = top_pipe_to_program->stream_res.opp;
|
||||
|
||||
tg = top_pipe_to_program->stream_res.tg;
|
||||
|
||||
dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
|
||||
|
||||
if (num_planes == 0) {
|
||||
|
||||
/* OTG blank before remove all front end */
|
||||
if (tg->funcs->set_blank)
|
||||
tg->funcs->set_blank(tg, true);
|
||||
dcn10_otg_blank(dc, top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
|
||||
}
|
||||
|
||||
/* Disconnect unused mpcc */
|
||||
|
@ -2056,6 +2122,101 @@ static void dcn10_apply_ctx_for_surface(
|
|||
*/
|
||||
}
|
||||
|
||||
static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
|
||||
{
|
||||
return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
|
||||
}
|
||||
|
||||
static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
|
||||
context->bw.dcn.calc_clk.dppclk_khz;
|
||||
bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
|
||||
context->bw.dcn.cur_clk.dispclk_khz;
|
||||
int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
|
||||
bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
|
||||
context->bw.dcn.cur_clk.dppclk_khz;
|
||||
|
||||
/* increase clock, looking for div is 0 for current, request div is 1*/
|
||||
if (dispclk_increase) {
|
||||
/* already divided by 2, no need to reach target clk with 2 steps*/
|
||||
if (cur_dpp_div)
|
||||
return context->bw.dcn.calc_clk.dispclk_khz;
|
||||
|
||||
/* request disp clk is lower than maximum supported dpp clk,
|
||||
* no need to reach target clk with two steps.
|
||||
*/
|
||||
if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
|
||||
return context->bw.dcn.calc_clk.dispclk_khz;
|
||||
|
||||
/* target dpp clk not request divided by 2, still within threshold */
|
||||
if (!request_dpp_div)
|
||||
return context->bw.dcn.calc_clk.dispclk_khz;
|
||||
|
||||
} else {
|
||||
/* decrease clock, looking for current dppclk divided by 2,
|
||||
* request dppclk not divided by 2.
|
||||
*/
|
||||
|
||||
/* current dpp clk not divided by 2, no need to ramp*/
|
||||
if (!cur_dpp_div)
|
||||
return context->bw.dcn.calc_clk.dispclk_khz;
|
||||
|
||||
/* current disp clk is lower than current maximum dpp clk,
|
||||
* no need to ramp
|
||||
*/
|
||||
if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
|
||||
return context->bw.dcn.calc_clk.dispclk_khz;
|
||||
|
||||
/* request dpp clk need to be divided by 2 */
|
||||
if (request_dpp_div)
|
||||
return context->bw.dcn.calc_clk.dispclk_khz;
|
||||
}
|
||||
|
||||
return disp_clk_threshold;
|
||||
}
|
||||
|
||||
static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
|
||||
context->bw.dcn.calc_clk.dppclk_khz;
|
||||
|
||||
int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
|
||||
|
||||
/* set disp clk to dpp clk threshold */
|
||||
dc->res_pool->display_clock->funcs->set_clock(
|
||||
dc->res_pool->display_clock,
|
||||
dispclk_to_dpp_threshold);
|
||||
|
||||
/* update request dpp clk division option */
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe_ctx->plane_state)
|
||||
continue;
|
||||
|
||||
pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
|
||||
pipe_ctx->plane_res.dpp,
|
||||
request_dpp_div,
|
||||
true);
|
||||
}
|
||||
|
||||
/* If target clk not same as dppclk threshold, set to target clock */
|
||||
if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
|
||||
dc->res_pool->display_clock->funcs->set_clock(
|
||||
dc->res_pool->display_clock,
|
||||
context->bw.dcn.calc_clk.dispclk_khz);
|
||||
}
|
||||
|
||||
context->bw.dcn.cur_clk.dispclk_khz =
|
||||
context->bw.dcn.calc_clk.dispclk_khz;
|
||||
context->bw.dcn.cur_clk.dppclk_khz =
|
||||
context->bw.dcn.calc_clk.dppclk_khz;
|
||||
context->bw.dcn.cur_clk.max_supported_dppclk_khz =
|
||||
context->bw.dcn.calc_clk.max_supported_dppclk_khz;
|
||||
}
|
||||
|
||||
static void dcn10_set_bandwidth(
|
||||
struct dc *dc,
|
||||
struct dc_state *context,
|
||||
|
@ -2073,32 +2234,32 @@ static void dcn10_set_bandwidth(
|
|||
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
|
||||
return;
|
||||
|
||||
if (decrease_allowed || context->bw.dcn.calc_clk.dispclk_khz
|
||||
> dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
|
||||
dc->res_pool->display_clock->funcs->set_clock(
|
||||
dc->res_pool->display_clock,
|
||||
context->bw.dcn.calc_clk.dispclk_khz);
|
||||
context->bw.dcn.cur_clk.dispclk_khz =
|
||||
context->bw.dcn.calc_clk.dispclk_khz;
|
||||
}
|
||||
if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_khz
|
||||
> dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
|
||||
if (should_set_clock(
|
||||
decrease_allowed,
|
||||
context->bw.dcn.calc_clk.dcfclk_khz,
|
||||
dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
|
||||
context->bw.dcn.cur_clk.dcfclk_khz =
|
||||
context->bw.dcn.calc_clk.dcfclk_khz;
|
||||
smu_req.hard_min_dcefclk_khz =
|
||||
context->bw.dcn.calc_clk.dcfclk_khz;
|
||||
}
|
||||
if (decrease_allowed || context->bw.dcn.calc_clk.fclk_khz
|
||||
> dc->current_state->bw.dcn.cur_clk.fclk_khz) {
|
||||
|
||||
if (should_set_clock(
|
||||
decrease_allowed,
|
||||
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
|
||||
dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
|
||||
context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
|
||||
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
|
||||
}
|
||||
|
||||
if (should_set_clock(
|
||||
decrease_allowed,
|
||||
context->bw.dcn.calc_clk.fclk_khz,
|
||||
dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
|
||||
context->bw.dcn.cur_clk.fclk_khz =
|
||||
context->bw.dcn.calc_clk.fclk_khz;
|
||||
smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
|
||||
}
|
||||
if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz
|
||||
> dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz) {
|
||||
context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
|
||||
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
|
||||
}
|
||||
|
||||
smu_req.display_count = context->stream_count;
|
||||
|
||||
|
@ -2107,17 +2268,17 @@ static void dcn10_set_bandwidth(
|
|||
|
||||
*smu_req_cur = smu_req;
|
||||
|
||||
/* Decrease in freq is increase in period so opposite comparison for dram_ccm */
|
||||
if (decrease_allowed || context->bw.dcn.calc_clk.dram_ccm_us
|
||||
< dc->current_state->bw.dcn.cur_clk.dram_ccm_us) {
|
||||
context->bw.dcn.cur_clk.dram_ccm_us =
|
||||
context->bw.dcn.calc_clk.dram_ccm_us;
|
||||
}
|
||||
if (decrease_allowed || context->bw.dcn.calc_clk.min_active_dram_ccm_us
|
||||
< dc->current_state->bw.dcn.cur_clk.min_active_dram_ccm_us) {
|
||||
context->bw.dcn.cur_clk.min_active_dram_ccm_us =
|
||||
context->bw.dcn.calc_clk.min_active_dram_ccm_us;
|
||||
/* make sure dcf clk is before dpp clk to
|
||||
* make sure we have enough voltage to run dpp clk
|
||||
*/
|
||||
if (should_set_clock(
|
||||
decrease_allowed,
|
||||
context->bw.dcn.calc_clk.dispclk_khz,
|
||||
dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
|
||||
|
||||
ramp_up_dispclk_with_dpp(dc, context);
|
||||
}
|
||||
|
||||
dcn10_pplib_apply_display_requirements(dc, context);
|
||||
|
||||
if (dc->debug.sanity_checks) {
|
||||
|
|
|
@ -440,7 +440,11 @@ static const struct dc_debug debug_defaults_drv = {
|
|||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
|
||||
.min_disp_clk_khz = 300000,
|
||||
/* raven smu dones't allow 0 disp clk,
|
||||
* smu min disp clk limit is 50Mhz
|
||||
* keep min disp clk 100Mhz avoid smu hang
|
||||
*/
|
||||
.min_disp_clk_khz = 100000,
|
||||
|
||||
.disable_pplib_clock_request = true,
|
||||
.disable_pplib_wm_range = false,
|
||||
|
@ -963,6 +967,7 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
|
|||
|
||||
idle_pipe->stream = head_pipe->stream;
|
||||
idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
|
||||
idle_pipe->stream_res.abm = head_pipe->stream_res.abm;
|
||||
idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
|
||||
|
||||
idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
*/
|
||||
|
||||
#include "display_rq_dlg_helpers.h"
|
||||
#include "dml_logger.h"
|
||||
|
||||
void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param)
|
||||
{
|
||||
|
|
|
@ -31,8 +31,6 @@
|
|||
#include "display_mode_structs.h"
|
||||
#include "display_mode_enums.h"
|
||||
|
||||
#define dml_print(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
|
||||
#define DTRACE(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
|
||||
|
||||
double dml_round(double a);
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
|
||||
#include "dml_common_defs.h"
|
||||
#include "../calcs/dcn_calc_math.h"
|
||||
#include "dml_logger.h"
|
||||
|
||||
static inline double dml_min(double a, double b)
|
||||
{
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
|
@ -23,11 +23,16 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_POWERPLAY_H__
|
||||
#define __AMDGPU_POWERPLAY_H__
|
||||
|
||||
#include "amd_shared.h"
|
||||
#ifndef __DML_LOGGER_H_
|
||||
#define __DML_LOGGER_H_
|
||||
|
||||
#define DC_LOGGER \
|
||||
mode_lib->logger
|
||||
|
||||
#define dml_print(str, ...) {DC_LOG_DML(str, ##__VA_ARGS__); }
|
||||
#define DTRACE(str, ...) {DC_LOG_DML(str, ##__VA_ARGS__); }
|
||||
|
||||
#endif
|
||||
|
||||
extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
|
||||
|
||||
#endif /* __AMDGPU_POWERPLAY_H__ */
|
|
@ -194,6 +194,8 @@ struct stream_resource {
|
|||
|
||||
struct pixel_clk_params pix_clk_params;
|
||||
struct encoder_info_frame encoder_info_frame;
|
||||
|
||||
struct abm *abm;
|
||||
};
|
||||
|
||||
struct plane_resource {
|
||||
|
|
|
@ -130,6 +130,9 @@ enum bw_defines {
|
|||
|
||||
struct bw_calcs_dceip {
|
||||
enum bw_calcs_version version;
|
||||
uint32_t percent_of_ideal_port_bw_received_after_urgent_latency;
|
||||
uint32_t max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation;
|
||||
uint32_t max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation;
|
||||
bool large_cursor;
|
||||
uint32_t cursor_max_outstanding_group_num;
|
||||
bool dmif_pipe_en_fbc_chunk_tracker;
|
||||
|
@ -230,6 +233,7 @@ struct bw_calcs_vbios {
|
|||
|
||||
struct bw_calcs_data {
|
||||
/* data for all displays */
|
||||
bool display_synchronization_enabled;
|
||||
uint32_t number_of_displays;
|
||||
enum bw_defines underlay_surface_type;
|
||||
enum bw_defines panning_and_bezel_adjustment;
|
||||
|
@ -241,6 +245,7 @@ struct bw_calcs_data {
|
|||
bool d1_display_write_back_dwb_enable;
|
||||
enum bw_defines d1_underlay_mode;
|
||||
|
||||
bool increase_voltage_to_support_mclk_switch;
|
||||
bool cpup_state_change_enable;
|
||||
bool cpuc_state_change_enable;
|
||||
bool nbp_state_change_enable;
|
||||
|
@ -449,6 +454,7 @@ struct bw_calcs_data {
|
|||
struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8];
|
||||
struct bw_fixed min_dram_speed_change_margin[3][8];
|
||||
struct bw_fixed dispclk_required_for_dram_speed_change[3][8];
|
||||
struct bw_fixed dispclk_required_for_dram_speed_change_pipe[3][8];
|
||||
struct bw_fixed blackout_duration_margin[3][8];
|
||||
struct bw_fixed dispclk_required_for_blackout_duration[3][8];
|
||||
struct bw_fixed dispclk_required_for_blackout_recovery[3][8];
|
||||
|
|
|
@ -132,6 +132,9 @@ struct dpp_funcs {
|
|||
const struct dc_cursor_mi_param *param,
|
||||
uint32_t width
|
||||
);
|
||||
void (*dpp_set_hdr_multiplier)(
|
||||
struct dpp *dpp_base,
|
||||
uint32_t multiplier);
|
||||
|
||||
void (*dpp_dppclk_control)(
|
||||
struct dpp *dpp_base,
|
||||
|
|
|
@ -98,6 +98,7 @@ enum dc_log_type {
|
|||
LOG_EVENT_UNDERFLOW,
|
||||
LOG_IF_TRACE,
|
||||
LOG_PERF_TRACE,
|
||||
LOG_PROFILING,
|
||||
|
||||
LOG_SECTION_TOTAL_COUNT
|
||||
};
|
||||
|
|
|
@ -1267,7 +1267,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
|
|||
bool ret = false;
|
||||
struct pwl_float_data_ex *rgb_regamma = NULL;
|
||||
|
||||
if (trans == TRANSFER_FUNCTION_UNITY) {
|
||||
if (trans == TRANSFER_FUNCTION_UNITY ||
|
||||
trans == TRANSFER_FUNCTION_LINEAR) {
|
||||
points->end_exponent = 0;
|
||||
points->x_point_at_y1_red = 1;
|
||||
points->x_point_at_y1_green = 1;
|
||||
|
@ -1337,7 +1338,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
|
|||
bool ret = false;
|
||||
struct pwl_float_data_ex *rgb_degamma = NULL;
|
||||
|
||||
if (trans == TRANSFER_FUNCTION_UNITY) {
|
||||
if (trans == TRANSFER_FUNCTION_UNITY ||
|
||||
trans == TRANSFER_FUNCTION_LINEAR) {
|
||||
|
||||
for (i = 0; i <= MAX_HW_POINTS ; i++) {
|
||||
points->red[i] = coordinates_x[i].x;
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
/* Refresh rate ramp at a fixed rate of 65 Hz/second */
|
||||
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
|
||||
/* Number of elements in the render times cache array */
|
||||
#define RENDER_TIMES_MAX_COUNT 20
|
||||
#define RENDER_TIMES_MAX_COUNT 10
|
||||
/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
|
||||
#define BTR_EXIT_MARGIN 2000
|
||||
/* Number of consecutive frames to check before entering/exiting fixed refresh*/
|
||||
|
@ -46,13 +46,15 @@
|
|||
|
||||
#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal"
|
||||
|
||||
#define FREESYNC_DEFAULT_REGKEY "LCDFreeSyncDefault"
|
||||
|
||||
struct gradual_static_ramp {
|
||||
bool ramp_is_active;
|
||||
bool ramp_direction_is_up;
|
||||
unsigned int ramp_current_frame_duration_in_ns;
|
||||
};
|
||||
|
||||
struct time_cache {
|
||||
struct freesync_time {
|
||||
/* video (48Hz feature) related */
|
||||
unsigned int update_duration_in_ns;
|
||||
|
||||
|
@ -64,6 +66,9 @@ struct time_cache {
|
|||
|
||||
unsigned int render_times_index;
|
||||
unsigned int render_times[RENDER_TIMES_MAX_COUNT];
|
||||
|
||||
unsigned int min_window;
|
||||
unsigned int max_window;
|
||||
};
|
||||
|
||||
struct below_the_range {
|
||||
|
@ -98,11 +103,14 @@ struct freesync_state {
|
|||
bool static_screen;
|
||||
bool video;
|
||||
|
||||
unsigned int vmin;
|
||||
unsigned int vmax;
|
||||
|
||||
struct freesync_time time;
|
||||
|
||||
unsigned int nominal_refresh_rate_in_micro_hz;
|
||||
bool windowed_fullscreen;
|
||||
|
||||
struct time_cache time;
|
||||
|
||||
struct gradual_static_ramp static_ramp;
|
||||
struct below_the_range btr;
|
||||
struct fixed_refresh fixed_refresh;
|
||||
|
@ -119,14 +127,16 @@ struct freesync_entity {
|
|||
struct freesync_registry_options {
|
||||
bool drr_external_supported;
|
||||
bool drr_internal_supported;
|
||||
bool lcd_freesync_default_set;
|
||||
int lcd_freesync_default_value;
|
||||
};
|
||||
|
||||
struct core_freesync {
|
||||
struct mod_freesync public;
|
||||
struct dc *dc;
|
||||
struct freesync_registry_options opts;
|
||||
struct freesync_entity *map;
|
||||
int num_entities;
|
||||
struct freesync_registry_options opts;
|
||||
};
|
||||
|
||||
#define MOD_FREESYNC_TO_CORE(mod_freesync)\
|
||||
|
@ -146,7 +156,7 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
|
|||
goto fail_alloc_context;
|
||||
|
||||
core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL);
|
||||
|
||||
if (core_freesync->map == NULL)
|
||||
goto fail_alloc_map;
|
||||
|
@ -183,6 +193,16 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
|
|||
(data & 1) ? false : true;
|
||||
}
|
||||
|
||||
if (dm_read_persistent_data(dc->ctx, NULL, NULL,
|
||||
FREESYNC_DEFAULT_REGKEY,
|
||||
&data, sizeof(data), &flag)) {
|
||||
core_freesync->opts.lcd_freesync_default_set = true;
|
||||
core_freesync->opts.lcd_freesync_default_value = data;
|
||||
} else {
|
||||
core_freesync->opts.lcd_freesync_default_set = false;
|
||||
core_freesync->opts.lcd_freesync_default_value = 0;
|
||||
}
|
||||
|
||||
return &core_freesync->public;
|
||||
|
||||
fail_construct:
|
||||
|
@ -288,6 +308,18 @@ bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
|
|||
core_freesync->map[core_freesync->num_entities].user_enable.
|
||||
enable_for_video =
|
||||
(persistent_freesync_enable & 4) ? true : false;
|
||||
/* If FreeSync display and LCDFreeSyncDefault is set, use as default values write back to userenable */
|
||||
} else if (caps->supported && (core_freesync->opts.lcd_freesync_default_set)) {
|
||||
core_freesync->map[core_freesync->num_entities].user_enable.enable_for_gaming =
|
||||
(core_freesync->opts.lcd_freesync_default_value & 1) ? true : false;
|
||||
core_freesync->map[core_freesync->num_entities].user_enable.enable_for_static =
|
||||
(core_freesync->opts.lcd_freesync_default_value & 2) ? true : false;
|
||||
core_freesync->map[core_freesync->num_entities].user_enable.enable_for_video =
|
||||
(core_freesync->opts.lcd_freesync_default_value & 4) ? true : false;
|
||||
dm_write_persistent_data(dc->ctx, stream->sink,
|
||||
FREESYNC_REGISTRY_NAME,
|
||||
"userenable", &core_freesync->opts.lcd_freesync_default_value,
|
||||
sizeof(int), &flag);
|
||||
} else {
|
||||
core_freesync->map[core_freesync->num_entities].user_enable.
|
||||
enable_for_gaming = false;
|
||||
|
@ -330,6 +362,25 @@ bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void adjust_vmin_vmax(struct core_freesync *core_freesync,
|
||||
struct dc_stream_state **streams,
|
||||
int num_streams,
|
||||
int map_index,
|
||||
unsigned int v_total_min,
|
||||
unsigned int v_total_max)
|
||||
{
|
||||
if (num_streams == 0 || streams == NULL || num_streams > 1)
|
||||
return;
|
||||
|
||||
core_freesync->map[map_index].state.vmin = v_total_min;
|
||||
core_freesync->map[map_index].state.vmax = v_total_max;
|
||||
|
||||
dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
|
||||
num_streams, v_total_min,
|
||||
v_total_max);
|
||||
}
|
||||
|
||||
|
||||
static void update_stream_freesync_context(struct core_freesync *core_freesync,
|
||||
struct dc_stream_state *stream)
|
||||
{
|
||||
|
@ -588,9 +639,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
|
|||
update_stream_freesync_context(core_freesync,
|
||||
streams[stream_idx]);
|
||||
|
||||
dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
|
||||
num_streams, v_total_min,
|
||||
v_total_max);
|
||||
adjust_vmin_vmax(core_freesync, streams,
|
||||
num_streams, map_index,
|
||||
v_total_min,
|
||||
v_total_max);
|
||||
|
||||
return true;
|
||||
|
||||
|
@ -613,9 +665,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
|
|||
core_freesync,
|
||||
streams[stream_idx]);
|
||||
|
||||
dc_stream_adjust_vmin_vmax(
|
||||
core_freesync->dc, streams,
|
||||
num_streams, v_total_nominal,
|
||||
adjust_vmin_vmax(
|
||||
core_freesync, streams,
|
||||
num_streams, map_index,
|
||||
v_total_nominal,
|
||||
v_total_nominal);
|
||||
}
|
||||
return true;
|
||||
|
@ -632,9 +685,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
|
|||
core_freesync,
|
||||
streams[stream_idx]);
|
||||
|
||||
dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
|
||||
num_streams, v_total_nominal,
|
||||
v_total_nominal);
|
||||
adjust_vmin_vmax(core_freesync, streams,
|
||||
num_streams, map_index,
|
||||
v_total_nominal,
|
||||
v_total_nominal);
|
||||
|
||||
/* Reset the cached variables */
|
||||
reset_freesync_state_variables(state);
|
||||
|
@ -650,9 +704,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
|
|||
* not support freesync because a former stream has
|
||||
* be programmed
|
||||
*/
|
||||
dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
|
||||
num_streams, v_total_nominal,
|
||||
v_total_nominal);
|
||||
adjust_vmin_vmax(core_freesync, streams,
|
||||
num_streams, map_index,
|
||||
v_total_nominal,
|
||||
v_total_nominal);
|
||||
/* Reset the cached variables */
|
||||
reset_freesync_state_variables(state);
|
||||
}
|
||||
|
@ -769,8 +824,9 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
|
|||
vmin = inserted_frame_v_total;
|
||||
|
||||
/* Program V_TOTAL */
|
||||
dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
|
||||
num_streams, vmin, vmax);
|
||||
adjust_vmin_vmax(core_freesync, streams,
|
||||
num_streams, index,
|
||||
vmin, vmax);
|
||||
}
|
||||
|
||||
if (state->btr.frame_counter > 0)
|
||||
|
@ -804,9 +860,10 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
|
|||
update_stream_freesync_context(core_freesync, streams[0]);
|
||||
|
||||
/* Program static screen ramp values */
|
||||
dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
|
||||
num_streams, v_total,
|
||||
v_total);
|
||||
adjust_vmin_vmax(core_freesync, streams,
|
||||
num_streams, index,
|
||||
v_total,
|
||||
v_total);
|
||||
|
||||
triggers.overlay_update = true;
|
||||
triggers.surface_update = true;
|
||||
|
@ -1063,9 +1120,9 @@ bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync,
|
|||
max_refresh);
|
||||
|
||||
/* Program vtotal min/max */
|
||||
dc_stream_adjust_vmin_vmax(core_freesync->dc, &streams, 1,
|
||||
state->freesync_range.vmin,
|
||||
state->freesync_range.vmax);
|
||||
adjust_vmin_vmax(core_freesync, &streams, 1, index,
|
||||
state->freesync_range.vmin,
|
||||
state->freesync_range.vmax);
|
||||
}
|
||||
|
||||
if (min_refresh != 0 &&
|
||||
|
@ -1399,11 +1456,9 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
|
|||
} else {
|
||||
|
||||
vmin = state->freesync_range.vmin;
|
||||
|
||||
vmax = vmin;
|
||||
|
||||
dc_stream_adjust_vmin_vmax(core_freesync->dc, &stream,
|
||||
1, vmin, vmax);
|
||||
adjust_vmin_vmax(core_freesync, &stream, map_index,
|
||||
1, vmin, vmax);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1457,3 +1512,43 @@ void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
|
||||
struct dc_stream_state **streams, int num_streams,
|
||||
unsigned int *v_total_min, unsigned int *v_total_max,
|
||||
unsigned int *event_triggers,
|
||||
unsigned int *window_min, unsigned int *window_max,
|
||||
unsigned int *lfc_mid_point_in_us,
|
||||
unsigned int *inserted_frames,
|
||||
unsigned int *inserted_duration_in_us)
|
||||
{
|
||||
unsigned int stream_index, map_index;
|
||||
struct core_freesync *core_freesync = NULL;
|
||||
|
||||
if (mod_freesync == NULL)
|
||||
return;
|
||||
|
||||
core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
|
||||
|
||||
for (stream_index = 0; stream_index < num_streams; stream_index++) {
|
||||
|
||||
map_index = map_index_from_stream(core_freesync,
|
||||
streams[stream_index]);
|
||||
|
||||
if (core_freesync->map[map_index].caps->supported) {
|
||||
struct freesync_state state =
|
||||
core_freesync->map[map_index].state;
|
||||
*v_total_min = state.vmin;
|
||||
*v_total_max = state.vmax;
|
||||
*event_triggers = 0;
|
||||
*window_min = state.time.min_window;
|
||||
*window_max = state.time.max_window;
|
||||
*lfc_mid_point_in_us = state.btr.mid_point_in_us;
|
||||
*inserted_frames = state.btr.frames_to_insert;
|
||||
*inserted_duration_in_us =
|
||||
state.btr.inserted_frame_duration_in_us;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -164,4 +164,13 @@ void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
|
|||
struct dc_stream_state **streams, int num_streams,
|
||||
unsigned int curr_time_stamp);
|
||||
|
||||
void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
|
||||
struct dc_stream_state **streams, int num_streams,
|
||||
unsigned int *v_total_min, unsigned int *v_total_max,
|
||||
unsigned int *event_triggers,
|
||||
unsigned int *window_min, unsigned int *window_max,
|
||||
unsigned int *lfc_mid_point_in_us,
|
||||
unsigned int *inserted_frames,
|
||||
unsigned int *inserted_duration_in_us);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
|
@ -19,29 +19,47 @@
|
|||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
#ifndef PP_ASICBLOCKS_H
|
||||
#define PP_ASICBLOCKS_H
|
||||
|
||||
#ifndef MODULES_INC_MOD_STATS_H_
|
||||
#define MODULES_INC_MOD_STATS_H_
|
||||
|
||||
enum PHM_AsicBlock {
|
||||
PHM_AsicBlock_GFX,
|
||||
PHM_AsicBlock_UVD_MVC,
|
||||
PHM_AsicBlock_UVD,
|
||||
PHM_AsicBlock_UVD_HD,
|
||||
PHM_AsicBlock_UVD_SD,
|
||||
PHM_AsicBlock_Count
|
||||
#include "dm_services.h"
|
||||
|
||||
struct mod_stats {
|
||||
int dummy;
|
||||
};
|
||||
|
||||
enum PHM_ClockGateSetting {
|
||||
PHM_ClockGateSetting_StaticOn,
|
||||
PHM_ClockGateSetting_StaticOff,
|
||||
PHM_ClockGateSetting_Dynamic
|
||||
struct mod_stats_caps {
|
||||
bool dummy;
|
||||
};
|
||||
|
||||
struct phm_asic_blocks {
|
||||
bool gfx : 1;
|
||||
bool uvd : 1;
|
||||
};
|
||||
struct mod_stats *mod_stats_create(struct dc *dc);
|
||||
|
||||
#endif
|
||||
void mod_stats_destroy(struct mod_stats *mod_stats);
|
||||
|
||||
bool mod_stats_init(struct mod_stats *mod_stats);
|
||||
|
||||
void mod_stats_dump(struct mod_stats *mod_stats);
|
||||
|
||||
void mod_stats_reset_data(struct mod_stats *mod_stats);
|
||||
|
||||
void mod_stats_update_flip(struct mod_stats *mod_stats,
|
||||
unsigned long timestamp_in_ns);
|
||||
|
||||
void mod_stats_update_vupdate(struct mod_stats *mod_stats,
|
||||
unsigned long timestamp_in_ns);
|
||||
|
||||
void mod_stats_update_freesync(struct mod_stats *mod_stats,
|
||||
unsigned int v_total_min,
|
||||
unsigned int v_total_max,
|
||||
unsigned int event_triggers,
|
||||
unsigned int window_min,
|
||||
unsigned int window_max,
|
||||
unsigned int lfc_mid_point_in_us,
|
||||
unsigned int inserted_frames,
|
||||
unsigned int inserted_frame_duration_in_us);
|
||||
|
||||
#endif /* MODULES_INC_MOD_STATS_H_ */
|
|
@ -0,0 +1,334 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include "mod_stats.h"
|
||||
#include "dm_services.h"
|
||||
#include "dc.h"
|
||||
#include "core_types.h"
|
||||
|
||||
#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
|
||||
#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000001
|
||||
#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
|
||||
|
||||
#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
|
||||
#define DAL_STATS_ENTRIES_REGKEY_DEFAULT 0x00350000
|
||||
#define DAL_STATS_ENTRIES_REGKEY_MAX 0x01000000
|
||||
|
||||
#define MOD_STATS_NUM_VSYNCS 5
|
||||
|
||||
struct stats_time_cache {
|
||||
unsigned long flip_timestamp_in_ns;
|
||||
unsigned long vupdate_timestamp_in_ns;
|
||||
|
||||
unsigned int render_time_in_us;
|
||||
unsigned int avg_render_time_in_us_last_ten;
|
||||
unsigned int v_sync_time_in_us[MOD_STATS_NUM_VSYNCS];
|
||||
unsigned int num_vsync_between_flips;
|
||||
|
||||
unsigned int flip_to_vsync_time_in_us;
|
||||
unsigned int vsync_to_flip_time_in_us;
|
||||
|
||||
unsigned int min_window;
|
||||
unsigned int max_window;
|
||||
unsigned int v_total_min;
|
||||
unsigned int v_total_max;
|
||||
unsigned int event_triggers;
|
||||
|
||||
unsigned int lfc_mid_point_in_us;
|
||||
unsigned int num_frames_inserted;
|
||||
unsigned int inserted_duration_in_us;
|
||||
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
struct core_stats {
|
||||
struct mod_stats public;
|
||||
struct dc *dc;
|
||||
|
||||
struct stats_time_cache *time;
|
||||
unsigned int index;
|
||||
|
||||
bool enabled;
|
||||
unsigned int entries;
|
||||
};
|
||||
|
||||
#define MOD_STATS_TO_CORE(mod_stats)\
|
||||
container_of(mod_stats, struct core_stats, public)
|
||||
|
||||
bool mod_stats_init(struct mod_stats *mod_stats)
|
||||
{
|
||||
bool result = false;
|
||||
struct core_stats *core_stats = NULL;
|
||||
struct dc *dc = NULL;
|
||||
|
||||
if (mod_stats == NULL)
|
||||
return false;
|
||||
|
||||
core_stats = MOD_STATS_TO_CORE(mod_stats);
|
||||
dc = core_stats->dc;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
struct mod_stats *mod_stats_create(struct dc *dc)
|
||||
{
|
||||
struct core_stats *core_stats = NULL;
|
||||
struct persistent_data_flag flag;
|
||||
unsigned int reg_data;
|
||||
int i = 0;
|
||||
|
||||
core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
|
||||
|
||||
if (core_stats == NULL)
|
||||
goto fail_alloc_context;
|
||||
|
||||
if (dc == NULL)
|
||||
goto fail_construct;
|
||||
|
||||
core_stats->dc = dc;
|
||||
|
||||
core_stats->enabled = DAL_STATS_ENABLE_REGKEY_DEFAULT;
|
||||
if (dm_read_persistent_data(dc->ctx, NULL, NULL,
|
||||
DAL_STATS_ENABLE_REGKEY,
|
||||
®_data, sizeof(unsigned int), &flag))
|
||||
core_stats->enabled = reg_data;
|
||||
|
||||
core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
|
||||
if (dm_read_persistent_data(dc->ctx, NULL, NULL,
|
||||
DAL_STATS_ENTRIES_REGKEY,
|
||||
®_data, sizeof(unsigned int), &flag)) {
|
||||
if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
|
||||
core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
|
||||
else
|
||||
core_stats->entries = reg_data;
|
||||
}
|
||||
|
||||
core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (core_stats->time == NULL)
|
||||
goto fail_construct;
|
||||
|
||||
/* Purposely leave index 0 unused so we don't need special logic to
|
||||
* handle calculation cases that depend on previous flip data.
|
||||
*/
|
||||
core_stats->index = 1;
|
||||
|
||||
return &core_stats->public;
|
||||
|
||||
fail_construct:
|
||||
kfree(core_stats);
|
||||
|
||||
fail_alloc_context:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void mod_stats_destroy(struct mod_stats *mod_stats)
|
||||
{
|
||||
if (mod_stats != NULL) {
|
||||
struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
|
||||
|
||||
if (core_stats->time != NULL)
|
||||
kfree(core_stats->time);
|
||||
|
||||
kfree(core_stats);
|
||||
}
|
||||
}
|
||||
|
||||
void mod_stats_dump(struct mod_stats *mod_stats)
|
||||
{
|
||||
struct dc *dc = NULL;
|
||||
struct dal_logger *logger = NULL;
|
||||
struct core_stats *core_stats = NULL;
|
||||
struct stats_time_cache *time = NULL;
|
||||
unsigned int index = 0;
|
||||
|
||||
if (mod_stats == NULL)
|
||||
return;
|
||||
|
||||
core_stats = MOD_STATS_TO_CORE(mod_stats);
|
||||
dc = core_stats->dc;
|
||||
logger = dc->ctx->logger;
|
||||
time = core_stats->time;
|
||||
|
||||
//LogEntry* pLog = GetLog()->Open(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
|
||||
|
||||
//if (!pLog->IsDummyEntry())
|
||||
{
|
||||
dm_logger_write(logger, LOG_PROFILING, "==Display Caps==\n");
|
||||
dm_logger_write(logger, LOG_PROFILING, "\n");
|
||||
dm_logger_write(logger, LOG_PROFILING, "\n");
|
||||
|
||||
dm_logger_write(logger, LOG_PROFILING, "==Stats==\n");
|
||||
dm_logger_write(logger, LOG_PROFILING,
|
||||
"render avgRender minWindow midPoint maxWindow vsyncToFlip flipToVsync #vsyncBetweenFlip #frame insertDuration vTotalMin vTotalMax eventTrigs vSyncTime1 vSyncTime2 vSyncTime3 vSyncTime4 vSyncTime5 flags\n");
|
||||
|
||||
for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
|
||||
dm_logger_write(logger, LOG_PROFILING,
|
||||
"%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u\n",
|
||||
time[i].render_time_in_us,
|
||||
time[i].avg_render_time_in_us_last_ten,
|
||||
time[i].min_window,
|
||||
time[i].lfc_mid_point_in_us,
|
||||
time[i].max_window,
|
||||
time[i].vsync_to_flip_time_in_us,
|
||||
time[i].flip_to_vsync_time_in_us,
|
||||
time[i].num_vsync_between_flips,
|
||||
time[i].num_frames_inserted,
|
||||
time[i].inserted_duration_in_us,
|
||||
time[i].v_total_min,
|
||||
time[i].v_total_max,
|
||||
time[i].event_triggers,
|
||||
time[i].v_sync_time_in_us[0],
|
||||
time[i].v_sync_time_in_us[1],
|
||||
time[i].v_sync_time_in_us[2],
|
||||
time[i].v_sync_time_in_us[3],
|
||||
time[i].v_sync_time_in_us[4],
|
||||
time[i].flags);
|
||||
}
|
||||
}
|
||||
//GetLog()->Close(pLog);
|
||||
//GetLog()->UnSetLogMask(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
|
||||
}
|
||||
|
||||
void mod_stats_reset_data(struct mod_stats *mod_stats)
|
||||
{
|
||||
struct core_stats *core_stats = NULL;
|
||||
struct stats_time_cache *time = NULL;
|
||||
unsigned int index = 0;
|
||||
|
||||
if (mod_stats == NULL)
|
||||
return;
|
||||
|
||||
core_stats = MOD_STATS_TO_CORE(mod_stats);
|
||||
|
||||
memset(core_stats->time, 0,
|
||||
sizeof(struct stats_time_cache) * core_stats->entries);
|
||||
|
||||
core_stats->index = 0;
|
||||
}
|
||||
|
||||
void mod_stats_update_flip(struct mod_stats *mod_stats,
|
||||
unsigned long timestamp_in_ns)
|
||||
{
|
||||
struct core_stats *core_stats = NULL;
|
||||
struct stats_time_cache *time = NULL;
|
||||
unsigned int index = 0;
|
||||
|
||||
if (mod_stats == NULL)
|
||||
return;
|
||||
|
||||
core_stats = MOD_STATS_TO_CORE(mod_stats);
|
||||
|
||||
if (core_stats->index >= core_stats->entries)
|
||||
return;
|
||||
|
||||
time = core_stats->time;
|
||||
index = core_stats->index;
|
||||
|
||||
time[index].flip_timestamp_in_ns = timestamp_in_ns;
|
||||
time[index].render_time_in_us =
|
||||
timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
|
||||
|
||||
if (index >= 10) {
|
||||
for (unsigned int i = 0; i < 10; i++)
|
||||
time[index].avg_render_time_in_us_last_ten +=
|
||||
time[index - i].render_time_in_us;
|
||||
time[index].avg_render_time_in_us_last_ten /= 10;
|
||||
}
|
||||
|
||||
if (time[index].num_vsync_between_flips > 0)
|
||||
time[index].vsync_to_flip_time_in_us =
|
||||
timestamp_in_ns - time[index].vupdate_timestamp_in_ns;
|
||||
else
|
||||
time[index].vsync_to_flip_time_in_us =
|
||||
timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
|
||||
|
||||
core_stats->index++;
|
||||
}
|
||||
|
||||
void mod_stats_update_vupdate(struct mod_stats *mod_stats,
|
||||
unsigned long timestamp_in_ns)
|
||||
{
|
||||
struct core_stats *core_stats = NULL;
|
||||
struct stats_time_cache *time = NULL;
|
||||
unsigned int index = 0;
|
||||
|
||||
if (mod_stats == NULL)
|
||||
return;
|
||||
|
||||
core_stats = MOD_STATS_TO_CORE(mod_stats);
|
||||
|
||||
if (core_stats->index >= core_stats->entries)
|
||||
return;
|
||||
|
||||
time = core_stats->time;
|
||||
index = core_stats->index;
|
||||
|
||||
time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
|
||||
if (time[index].num_vsync_between_flips < MOD_STATS_NUM_VSYNCS)
|
||||
time[index].v_sync_time_in_us[time[index].num_vsync_between_flips] =
|
||||
timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
|
||||
time[index].flip_to_vsync_time_in_us =
|
||||
timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
|
||||
|
||||
time[index].num_vsync_between_flips++;
|
||||
}
|
||||
|
||||
void mod_stats_update_freesync(struct mod_stats *mod_stats,
|
||||
unsigned int v_total_min,
|
||||
unsigned int v_total_max,
|
||||
unsigned int event_triggers,
|
||||
unsigned int window_min,
|
||||
unsigned int window_max,
|
||||
unsigned int lfc_mid_point_in_us,
|
||||
unsigned int inserted_frames,
|
||||
unsigned int inserted_duration_in_us)
|
||||
{
|
||||
struct core_stats *core_stats = NULL;
|
||||
struct stats_time_cache *time = NULL;
|
||||
unsigned int index = 0;
|
||||
|
||||
if (mod_stats == NULL)
|
||||
return;
|
||||
|
||||
core_stats = MOD_STATS_TO_CORE(mod_stats);
|
||||
|
||||
if (core_stats->index >= core_stats->entries)
|
||||
return;
|
||||
|
||||
time = core_stats->time;
|
||||
index = core_stats->index;
|
||||
|
||||
time[index].v_total_min = v_total_min;
|
||||
time[index].v_total_max = v_total_max;
|
||||
time[index].event_triggers = event_triggers;
|
||||
time[index].min_window = window_min;
|
||||
time[index].max_window = window_max;
|
||||
time[index].lfc_mid_point_in_us = lfc_mid_point_in_us;
|
||||
time[index].num_frames_inserted = inserted_frames;
|
||||
time[index].inserted_duration_in_us = inserted_duration_in_us;
|
||||
}
|
||||
|
|
@ -24,8 +24,7 @@
|
|||
#ifndef __KGD_PP_INTERFACE_H__
|
||||
#define __KGD_PP_INTERFACE_H__
|
||||
|
||||
extern const struct amd_ip_funcs pp_ip_funcs;
|
||||
extern const struct amd_pm_funcs pp_dpm_funcs;
|
||||
extern const struct amdgpu_ip_block_version pp_smu_ip_block;
|
||||
|
||||
struct amd_vce_state {
|
||||
/* vce clocks */
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __SOC15_IH_CLIENTID_H__
|
||||
#define __SOC15_IH_CLIENTID_H__
|
||||
|
||||
/*
|
||||
* vega10+ IH clients
|
||||
*/
|
||||
enum soc15_ih_clientid {
|
||||
SOC15_IH_CLIENTID_IH = 0x00,
|
||||
SOC15_IH_CLIENTID_ACP = 0x01,
|
||||
SOC15_IH_CLIENTID_ATHUB = 0x02,
|
||||
SOC15_IH_CLIENTID_BIF = 0x03,
|
||||
SOC15_IH_CLIENTID_DCE = 0x04,
|
||||
SOC15_IH_CLIENTID_ISP = 0x05,
|
||||
SOC15_IH_CLIENTID_PCIE0 = 0x06,
|
||||
SOC15_IH_CLIENTID_RLC = 0x07,
|
||||
SOC15_IH_CLIENTID_SDMA0 = 0x08,
|
||||
SOC15_IH_CLIENTID_SDMA1 = 0x09,
|
||||
SOC15_IH_CLIENTID_SE0SH = 0x0a,
|
||||
SOC15_IH_CLIENTID_SE1SH = 0x0b,
|
||||
SOC15_IH_CLIENTID_SE2SH = 0x0c,
|
||||
SOC15_IH_CLIENTID_SE3SH = 0x0d,
|
||||
SOC15_IH_CLIENTID_SYSHUB = 0x0e,
|
||||
SOC15_IH_CLIENTID_THM = 0x0f,
|
||||
SOC15_IH_CLIENTID_UVD = 0x10,
|
||||
SOC15_IH_CLIENTID_VCE0 = 0x11,
|
||||
SOC15_IH_CLIENTID_VMC = 0x12,
|
||||
SOC15_IH_CLIENTID_XDMA = 0x13,
|
||||
SOC15_IH_CLIENTID_GRBM_CP = 0x14,
|
||||
SOC15_IH_CLIENTID_ATS = 0x15,
|
||||
SOC15_IH_CLIENTID_ROM_SMUIO = 0x16,
|
||||
SOC15_IH_CLIENTID_DF = 0x17,
|
||||
SOC15_IH_CLIENTID_VCE1 = 0x18,
|
||||
SOC15_IH_CLIENTID_PWR = 0x19,
|
||||
SOC15_IH_CLIENTID_UTCL2 = 0x1b,
|
||||
SOC15_IH_CLIENTID_EA = 0x1c,
|
||||
SOC15_IH_CLIENTID_UTCL2LOG = 0x1d,
|
||||
SOC15_IH_CLIENTID_MP0 = 0x1e,
|
||||
SOC15_IH_CLIENTID_MP1 = 0x1f,
|
||||
|
||||
SOC15_IH_CLIENTID_MAX,
|
||||
|
||||
SOC15_IH_CLIENTID_VCN = SOC15_IH_CLIENTID_UVD
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
|
@ -24,191 +24,191 @@
|
|||
#define MAX_INSTANCE 5
|
||||
#define MAX_SEGMENT 5
|
||||
|
||||
struct IP_BASE_INSTANCE
|
||||
struct IP_BASE_INSTANCE
|
||||
{
|
||||
unsigned int segment[MAX_SEGMENT];
|
||||
};
|
||||
|
||||
struct IP_BASE
|
||||
|
||||
struct IP_BASE
|
||||
{
|
||||
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
|
||||
};
|
||||
|
||||
|
||||
static const struct IP_BASE NBIF_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE NBIF_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE MP2_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE MP2_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment
|
||||
static const struct IP_BASE VCN_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE VCN_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment
|
||||
static const struct IP_BASE DBGU_BASE = { { { { 0x00000180, 0x000001A0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE DBGU_BASE = { { { { 0x00000180, 0x000001A0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } }; // not exist
|
||||
static const struct IP_BASE DBGU_NBIO_BASE = { { { { 0x000001C0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE DBGU_NBIO_BASE = { { { { 0x000001C0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } }; // not exist
|
||||
static const struct IP_BASE DBGU_IO_BASE = { { { { 0x000001E0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE DBGU_IO_BASE = { { { { 0x000001E0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } }; // not exist
|
||||
static const struct IP_BASE DFX_DAP_BASE = { { { { 0x000005A0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE DFX_DAP_BASE = { { { { 0x000005A0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } }; // not exist
|
||||
static const struct IP_BASE DFX_BASE = { { { { 0x00000580, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE DFX_BASE = { { { { 0x00000580, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } }; // this file does not contain registers
|
||||
static const struct IP_BASE ISP_BASE = { { { { 0x00018000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE ISP_BASE = { { { { 0x00018000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } }; // not exist
|
||||
static const struct IP_BASE SYSTEMHUB_BASE = { { { { 0x00000EA0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE SYSTEMHUB_BASE = { { { { 0x00000EA0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } }; // not exist
|
||||
static const struct IP_BASE L2IMU_BASE = { { { { 0x00007DC0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE L2IMU_BASE = { { { { 0x00007DC0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE IOHC_BASE = { { { { 0x00010000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE IOHC_BASE = { { { { 0x00010000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE VCE_BASE = { { { { 0x00007E00, 0x00048800, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE VCE_BASE = { { { { 0x00007E00, 0x00048800, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE SDMA1_BASE = { { { { 0x00001460, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE SDMA1_BASE = { { { { 0x00001460, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE PWR_BASE = { { { { 0x00016A00, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
static const struct IP_BASE PWR_BASE = { { { { 0x00016A00, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0, 0, 0, 0 } },
|
||||
{ { 0x00016E00, 0, 0, 0, 0 } },
|
||||
{ { 0x00017000, 0, 0, 0, 0 } },
|
||||
{ { 0x00017200, 0, 0, 0, 0 } },
|
||||
{ { 0x00017E00, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0x00016E00, 0, 0, 0, 0 } },
|
||||
{ { 0x00017000, 0, 0, 0, 0 } },
|
||||
{ { 0x00017200, 0, 0, 0, 0 } },
|
||||
{ { 0x00017E00, 0, 0, 0, 0 } } } };
|
||||
static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0 } } } };
|
||||
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -24,14 +24,14 @@
|
|||
# It provides the hardware management services for the driver.
|
||||
|
||||
HARDWARE_MGR = hwmgr.o processpptables.o \
|
||||
hardwaremanager.o cz_hwmgr.o \
|
||||
cz_clockpowergating.o pppcielanes.o\
|
||||
hardwaremanager.o smu8_hwmgr.o \
|
||||
pppcielanes.o\
|
||||
process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \
|
||||
smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
|
||||
smu7_clockpowergating.o \
|
||||
vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
|
||||
vega10_thermal.o rv_hwmgr.o pp_psm.o\
|
||||
pp_overdriver.o
|
||||
vega10_thermal.o smu10_hwmgr.o pp_psm.o\
|
||||
pp_overdriver.o smu_helper.o
|
||||
|
||||
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
|
||||
|
||||
|
|
|
@ -1,209 +0,0 @@
|
|||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "hwmgr.h"
|
||||
#include "cz_clockpowergating.h"
|
||||
#include "cz_ppsmc.h"
|
||||
|
||||
/* PhyID -> Status Mapping in DDI_PHY_GEN_STATUS
|
||||
0 GFX0L (3:0), (27:24),
|
||||
1 GFX0H (7:4), (31:28),
|
||||
2 GFX1L (3:0), (19:16),
|
||||
3 GFX1H (7:4), (23:20),
|
||||
4 DDIL (3:0), (11: 8),
|
||||
5 DDIH (7:4), (15:12),
|
||||
6 DDI2L (3:0), ( 3: 0),
|
||||
7 DDI2H (7:4), ( 7: 4),
|
||||
*/
|
||||
#define DDI_PHY_GEN_STATUS_VAL(phyID) (1 << ((3 - ((phyID & 0x07)/2))*8 + (phyID & 0x01)*4))
|
||||
#define IS_PHY_ID_USED_BY_PLL(PhyID) (((0xF3 & (1 << PhyID)) & 0xFF) ? true : false)
|
||||
|
||||
|
||||
int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (block) {
|
||||
case PHM_AsicBlock_UVD_MVC:
|
||||
case PHM_AsicBlock_UVD:
|
||||
case PHM_AsicBlock_UVD_HD:
|
||||
case PHM_AsicBlock_UVD_SD:
|
||||
if (gating == PHM_ClockGateSetting_StaticOff)
|
||||
ret = cz_dpm_powerdown_uvd(hwmgr);
|
||||
else
|
||||
ret = cz_dpm_powerup_uvd(hwmgr);
|
||||
break;
|
||||
case PHM_AsicBlock_GFX:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
bool cz_phm_is_safe_for_asic_block(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, enum PHM_AsicBlock block)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
int cz_phm_enable_disable_gfx_power_gating(struct pp_hwmgr *hwmgr, bool enable)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_phm_smu_power_up_down_pcie(struct pp_hwmgr *hwmgr, uint32_t target, bool up, uint32_t args)
|
||||
{
|
||||
/* TODO */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_phm_initialize_display_phy_access(struct pp_hwmgr *hwmgr, bool initialize, bool accesshw)
|
||||
{
|
||||
/* TODO */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_phm_get_display_phy_access_info(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
/* TODO */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_phm_gate_unused_display_phys(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
/* TODO */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
/* TODO */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
{
|
||||
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
|
||||
uint32_t dpm_features = 0;
|
||||
|
||||
if (enable &&
|
||||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_UVDDPM)) {
|
||||
cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled;
|
||||
dpm_features |= UVD_DPM_MASK;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
|
||||
} else {
|
||||
dpm_features |= UVD_DPM_MASK;
|
||||
cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
{
|
||||
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
|
||||
uint32_t dpm_features = 0;
|
||||
|
||||
if (enable && phm_cap_enabled(
|
||||
hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_VCEDPM)) {
|
||||
cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled;
|
||||
dpm_features |= VCE_DPM_MASK;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
|
||||
} else {
|
||||
dpm_features |= VCE_DPM_MASK;
|
||||
cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
{
|
||||
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
|
||||
|
||||
cz_hwmgr->uvd_power_gated = bgate;
|
||||
|
||||
if (bgate) {
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
cz_dpm_update_uvd_dpm(hwmgr, true);
|
||||
cz_dpm_powerdown_uvd(hwmgr);
|
||||
} else {
|
||||
cz_dpm_powerup_uvd(hwmgr);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cz_dpm_update_uvd_dpm(hwmgr, false);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
{
|
||||
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (bgate) {
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
cz_enable_disable_vce_dpm(hwmgr, false);
|
||||
cz_dpm_powerdown_vce(hwmgr);
|
||||
cz_hwmgr->vce_power_gated = true;
|
||||
} else {
|
||||
cz_dpm_powerup_vce(hwmgr);
|
||||
cz_hwmgr->vce_power_gated = false;
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cz_dpm_update_vce_dpm(hwmgr);
|
||||
cz_enable_disable_vce_dpm(hwmgr, true);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _CZ_CLOCK_POWER_GATING_H_
|
||||
#define _CZ_CLOCK_POWER_GATING_H_
|
||||
|
||||
#include "cz_hwmgr.h"
|
||||
#include "pp_asicblocks.h"
|
||||
|
||||
extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
|
||||
extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
|
||||
extern void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
|
||||
extern void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
|
||||
extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
|
||||
extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
|
||||
#endif /* _CZ_CLOCK_POWER_GATING_H_ */
|
|
@ -30,22 +30,24 @@
|
|||
#include <drm/amdgpu_drm.h>
|
||||
#include "power_state.h"
|
||||
#include "hwmgr.h"
|
||||
#include "pppcielanes.h"
|
||||
#include "ppatomctrl.h"
|
||||
#include "ppsmc.h"
|
||||
#include "amd_acpi.h"
|
||||
#include "pp_psm.h"
|
||||
|
||||
extern const struct pp_smumgr_func ci_smu_funcs;
|
||||
extern const struct pp_smumgr_func cz_smu_funcs;
|
||||
extern const struct pp_smumgr_func smu8_smu_funcs;
|
||||
extern const struct pp_smumgr_func iceland_smu_funcs;
|
||||
extern const struct pp_smumgr_func tonga_smu_funcs;
|
||||
extern const struct pp_smumgr_func fiji_smu_funcs;
|
||||
extern const struct pp_smumgr_func polaris10_smu_funcs;
|
||||
extern const struct pp_smumgr_func vega10_smu_funcs;
|
||||
extern const struct pp_smumgr_func rv_smu_funcs;
|
||||
extern const struct pp_smumgr_func smu10_smu_funcs;
|
||||
|
||||
extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
|
||||
extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr);
|
||||
extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
|
||||
extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
|
||||
|
||||
extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
|
||||
static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
|
||||
static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
|
||||
static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
|
||||
|
@ -54,32 +56,6 @@ static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
|
|||
static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
|
||||
static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
|
||||
|
||||
uint8_t convert_to_vid(uint16_t vddc)
|
||||
{
|
||||
return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
|
||||
}
|
||||
|
||||
uint16_t convert_to_vddc(uint8_t vid)
|
||||
{
|
||||
return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
|
||||
}
|
||||
|
||||
uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
|
||||
{
|
||||
u32 mask = 0;
|
||||
u32 shift = 0;
|
||||
|
||||
shift = (offset % 4) << 3;
|
||||
if (size == sizeof(uint8_t))
|
||||
mask = 0xFF << shift;
|
||||
else if (size == sizeof(uint16_t))
|
||||
mask = 0xFFFF << shift;
|
||||
|
||||
original_data &= ~mask;
|
||||
original_data |= (field << shift);
|
||||
return original_data;
|
||||
}
|
||||
|
||||
static int phm_thermal_l2h_irq(void *private_data,
|
||||
unsigned src_id, const uint32_t *iv_entry)
|
||||
{
|
||||
|
@ -140,23 +116,11 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
|
|||
hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
|
||||
}
|
||||
|
||||
int hwmgr_early_init(struct pp_instance *handle)
|
||||
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
|
||||
if (handle == NULL)
|
||||
if (hwmgr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
|
||||
if (hwmgr == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
handle->hwmgr = hwmgr;
|
||||
hwmgr->adev = handle->parent;
|
||||
hwmgr->device = handle->device;
|
||||
hwmgr->chip_family = ((struct amdgpu_device *)handle->parent)->family;
|
||||
hwmgr->chip_id = ((struct amdgpu_device *)handle->parent)->asic_type;
|
||||
hwmgr->feature_mask = amdgpu_pp_feature_mask;
|
||||
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
|
||||
hwmgr->power_source = PP_PowerSource_AC;
|
||||
hwmgr->pp_table_version = PP_TABLE_V1;
|
||||
|
@ -180,8 +144,8 @@ int hwmgr_early_init(struct pp_instance *handle)
|
|||
break;
|
||||
case AMDGPU_FAMILY_CZ:
|
||||
hwmgr->od_enabled = false;
|
||||
hwmgr->smumgr_funcs = &cz_smu_funcs;
|
||||
cz_init_function_pointers(hwmgr);
|
||||
hwmgr->smumgr_funcs = &smu8_smu_funcs;
|
||||
smu8_init_function_pointers(hwmgr);
|
||||
break;
|
||||
case AMDGPU_FAMILY_VI:
|
||||
switch (hwmgr->chip_id) {
|
||||
|
@ -230,8 +194,8 @@ int hwmgr_early_init(struct pp_instance *handle)
|
|||
switch (hwmgr->chip_id) {
|
||||
case CHIP_RAVEN:
|
||||
hwmgr->od_enabled = false;
|
||||
hwmgr->smumgr_funcs = &rv_smu_funcs;
|
||||
rv_init_function_pointers(hwmgr);
|
||||
hwmgr->smumgr_funcs = &smu10_smu_funcs;
|
||||
smu10_init_function_pointers(hwmgr);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -244,16 +208,13 @@ int hwmgr_early_init(struct pp_instance *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int hwmgr_hw_init(struct pp_instance *handle)
|
||||
int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
int ret = 0;
|
||||
|
||||
if (handle == NULL)
|
||||
if (hwmgr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
hwmgr = handle->hwmgr;
|
||||
|
||||
if (hwmgr->pptable_func == NULL ||
|
||||
hwmgr->pptable_func->pptable_init == NULL ||
|
||||
hwmgr->hwmgr_func->backend_init == NULL)
|
||||
|
@ -299,15 +260,11 @@ int hwmgr_hw_init(struct pp_instance *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int hwmgr_hw_fini(struct pp_instance *handle)
|
||||
int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
|
||||
if (handle == NULL || handle->hwmgr == NULL)
|
||||
if (hwmgr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
hwmgr = handle->hwmgr;
|
||||
|
||||
phm_stop_thermal_controller(hwmgr);
|
||||
psm_set_boot_states(hwmgr);
|
||||
psm_adjust_power_state_dynamic(hwmgr, false, NULL);
|
||||
|
@ -321,15 +278,13 @@ int hwmgr_hw_fini(struct pp_instance *handle)
|
|||
return psm_fini_power_state_table(hwmgr);
|
||||
}
|
||||
|
||||
int hwmgr_hw_suspend(struct pp_instance *handle)
|
||||
int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
int ret = 0;
|
||||
|
||||
if (handle == NULL || handle->hwmgr == NULL)
|
||||
if (hwmgr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
hwmgr = handle->hwmgr;
|
||||
phm_disable_smc_firmware_ctf(hwmgr);
|
||||
ret = psm_set_boot_states(hwmgr);
|
||||
if (ret)
|
||||
|
@ -342,15 +297,13 @@ int hwmgr_hw_suspend(struct pp_instance *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int hwmgr_hw_resume(struct pp_instance *handle)
|
||||
int hwmgr_hw_resume(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
int ret = 0;
|
||||
|
||||
if (handle == NULL || handle->hwmgr == NULL)
|
||||
if (hwmgr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
hwmgr = handle->hwmgr;
|
||||
ret = phm_setup_asic(hwmgr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -385,17 +338,14 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
|
|||
}
|
||||
}
|
||||
|
||||
int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
|
||||
int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
|
||||
enum amd_pm_state_type *user_state)
|
||||
{
|
||||
int ret = 0;
|
||||
struct pp_hwmgr *hwmgr;
|
||||
|
||||
if (handle == NULL || handle->hwmgr == NULL)
|
||||
if (hwmgr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
hwmgr = handle->hwmgr;
|
||||
|
||||
switch (task_id) {
|
||||
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
|
||||
ret = phm_set_cpu_power_state(hwmgr);
|
||||
|
@ -432,468 +382,6 @@ int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
|
|||
}
|
||||
return ret;
|
||||
}
|
||||
/**
|
||||
* Returns once the part of the register indicated by the mask has
|
||||
* reached the given value.
|
||||
*/
|
||||
int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
|
||||
uint32_t value, uint32_t mask)
|
||||
{
|
||||
uint32_t i;
|
||||
uint32_t cur_value;
|
||||
|
||||
if (hwmgr == NULL || hwmgr->device == NULL) {
|
||||
pr_err("Invalid Hardware Manager!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < hwmgr->usec_timeout; i++) {
|
||||
cur_value = cgs_read_register(hwmgr->device, index);
|
||||
if ((cur_value & mask) == (value & mask))
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/* timeout means wrong logic*/
|
||||
if (i == hwmgr->usec_timeout)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns once the part of the register indicated by the mask has
|
||||
* reached the given value.The indirect space is described by giving
|
||||
* the memory-mapped index of the indirect index register.
|
||||
*/
|
||||
int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
|
||||
uint32_t indirect_port,
|
||||
uint32_t index,
|
||||
uint32_t value,
|
||||
uint32_t mask)
|
||||
{
|
||||
if (hwmgr == NULL || hwmgr->device == NULL) {
|
||||
pr_err("Invalid Hardware Manager!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cgs_write_register(hwmgr->device, indirect_port, index);
|
||||
return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
|
||||
}
|
||||
|
||||
int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
|
||||
uint32_t index,
|
||||
uint32_t value, uint32_t mask)
|
||||
{
|
||||
uint32_t i;
|
||||
uint32_t cur_value;
|
||||
|
||||
if (hwmgr == NULL || hwmgr->device == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < hwmgr->usec_timeout; i++) {
|
||||
cur_value = cgs_read_register(hwmgr->device,
|
||||
index);
|
||||
if ((cur_value & mask) != (value & mask))
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/* timeout means wrong logic */
|
||||
if (i == hwmgr->usec_timeout)
|
||||
return -ETIME;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
|
||||
uint32_t indirect_port,
|
||||
uint32_t index,
|
||||
uint32_t value,
|
||||
uint32_t mask)
|
||||
{
|
||||
if (hwmgr == NULL || hwmgr->device == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
cgs_write_register(hwmgr->device, indirect_port, index);
|
||||
return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
|
||||
value, mask);
|
||||
}
|
||||
|
||||
bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
|
||||
}
|
||||
|
||||
bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
|
||||
}
|
||||
|
||||
|
||||
int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
|
||||
{
|
||||
uint32_t i, j;
|
||||
uint16_t vvalue;
|
||||
bool found = false;
|
||||
struct pp_atomctrl_voltage_table *table;
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != vol_table),
|
||||
"Voltage Table empty.", return -EINVAL);
|
||||
|
||||
table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (NULL == table)
|
||||
return -EINVAL;
|
||||
|
||||
table->mask_low = vol_table->mask_low;
|
||||
table->phase_delay = vol_table->phase_delay;
|
||||
|
||||
for (i = 0; i < vol_table->count; i++) {
|
||||
vvalue = vol_table->entries[i].value;
|
||||
found = false;
|
||||
|
||||
for (j = 0; j < table->count; j++) {
|
||||
if (vvalue == table->entries[j].value) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
table->entries[table->count].value = vvalue;
|
||||
table->entries[table->count].smio_low =
|
||||
vol_table->entries[i].smio_low;
|
||||
table->count++;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
|
||||
kfree(table);
|
||||
table = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
|
||||
phm_ppt_v1_clock_voltage_dependency_table *dep_table)
|
||||
{
|
||||
uint32_t i;
|
||||
int result;
|
||||
|
||||
PP_ASSERT_WITH_CODE((0 != dep_table->count),
|
||||
"Voltage Dependency Table empty.", return -EINVAL);
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != vol_table),
|
||||
"vol_table empty.", return -EINVAL);
|
||||
|
||||
vol_table->mask_low = 0;
|
||||
vol_table->phase_delay = 0;
|
||||
vol_table->count = dep_table->count;
|
||||
|
||||
for (i = 0; i < dep_table->count; i++) {
|
||||
vol_table->entries[i].value = dep_table->entries[i].mvdd;
|
||||
vol_table->entries[i].smio_low = 0;
|
||||
}
|
||||
|
||||
result = phm_trim_voltage_table(vol_table);
|
||||
PP_ASSERT_WITH_CODE((0 == result),
|
||||
"Failed to trim MVDD table.", return result);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
|
||||
phm_ppt_v1_clock_voltage_dependency_table *dep_table)
|
||||
{
|
||||
uint32_t i;
|
||||
int result;
|
||||
|
||||
PP_ASSERT_WITH_CODE((0 != dep_table->count),
|
||||
"Voltage Dependency Table empty.", return -EINVAL);
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != vol_table),
|
||||
"vol_table empty.", return -EINVAL);
|
||||
|
||||
vol_table->mask_low = 0;
|
||||
vol_table->phase_delay = 0;
|
||||
vol_table->count = dep_table->count;
|
||||
|
||||
for (i = 0; i < dep_table->count; i++) {
|
||||
vol_table->entries[i].value = dep_table->entries[i].vddci;
|
||||
vol_table->entries[i].smio_low = 0;
|
||||
}
|
||||
|
||||
result = phm_trim_voltage_table(vol_table);
|
||||
PP_ASSERT_WITH_CODE((0 == result),
|
||||
"Failed to trim VDDCI table.", return result);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
|
||||
phm_ppt_v1_voltage_lookup_table *lookup_table)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
PP_ASSERT_WITH_CODE((0 != lookup_table->count),
|
||||
"Voltage Lookup Table empty.", return -EINVAL);
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != vol_table),
|
||||
"vol_table empty.", return -EINVAL);
|
||||
|
||||
vol_table->mask_low = 0;
|
||||
vol_table->phase_delay = 0;
|
||||
|
||||
vol_table->count = lookup_table->count;
|
||||
|
||||
for (i = 0; i < vol_table->count; i++) {
|
||||
vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
|
||||
vol_table->entries[i].smio_low = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
|
||||
struct pp_atomctrl_voltage_table *vol_table)
|
||||
{
|
||||
unsigned int i, diff;
|
||||
|
||||
if (vol_table->count <= max_vol_steps)
|
||||
return;
|
||||
|
||||
diff = vol_table->count - max_vol_steps;
|
||||
|
||||
for (i = 0; i < max_vol_steps; i++)
|
||||
vol_table->entries[i] = vol_table->entries[i + diff];
|
||||
|
||||
vol_table->count = max_vol_steps;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int phm_reset_single_dpm_table(void *table,
|
||||
uint32_t count, int max)
|
||||
{
|
||||
int i;
|
||||
|
||||
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
|
||||
|
||||
dpm_table->count = count > max ? max : count;
|
||||
|
||||
for (i = 0; i < dpm_table->count; i++)
|
||||
dpm_table->dpm_level[i].enabled = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void phm_setup_pcie_table_entry(
|
||||
void *table,
|
||||
uint32_t index, uint32_t pcie_gen,
|
||||
uint32_t pcie_lanes)
|
||||
{
|
||||
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
|
||||
dpm_table->dpm_level[index].value = pcie_gen;
|
||||
dpm_table->dpm_level[index].param1 = pcie_lanes;
|
||||
dpm_table->dpm_level[index].enabled = 1;
|
||||
}
|
||||
|
||||
int32_t phm_get_dpm_level_enable_mask_value(void *table)
|
||||
{
|
||||
int32_t i;
|
||||
int32_t mask = 0;
|
||||
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
|
||||
|
||||
for (i = dpm_table->count; i > 0; i--) {
|
||||
mask = mask << 1;
|
||||
if (dpm_table->dpm_level[i - 1].enabled)
|
||||
mask |= 0x1;
|
||||
else
|
||||
mask &= 0xFFFFFFFE;
|
||||
}
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
uint8_t phm_get_voltage_index(
|
||||
struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
|
||||
{
|
||||
uint8_t count = (uint8_t) (lookup_table->count);
|
||||
uint8_t i;
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != lookup_table),
|
||||
"Lookup Table empty.", return 0);
|
||||
PP_ASSERT_WITH_CODE((0 != count),
|
||||
"Lookup Table empty.", return 0);
|
||||
|
||||
for (i = 0; i < lookup_table->count; i++) {
|
||||
/* find first voltage equal or bigger than requested */
|
||||
if (lookup_table->entries[i].us_vdd >= voltage)
|
||||
return i;
|
||||
}
|
||||
/* voltage is bigger than max voltage in the table */
|
||||
return i - 1;
|
||||
}
|
||||
|
||||
uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
|
||||
uint32_t voltage)
|
||||
{
|
||||
uint8_t count = (uint8_t) (voltage_table->count);
|
||||
uint8_t i = 0;
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != voltage_table),
|
||||
"Voltage Table empty.", return 0;);
|
||||
PP_ASSERT_WITH_CODE((0 != count),
|
||||
"Voltage Table empty.", return 0;);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
/* find first voltage bigger than requested */
|
||||
if (voltage_table->entries[i].value >= voltage)
|
||||
return i;
|
||||
}
|
||||
|
||||
/* voltage is bigger than max voltage in the table */
|
||||
return i - 1;
|
||||
}
|
||||
|
||||
uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < vddci_table->count; i++) {
|
||||
if (vddci_table->entries[i].value >= vddci)
|
||||
return vddci_table->entries[i].value;
|
||||
}
|
||||
|
||||
pr_debug("vddci is larger than max value in vddci_table\n");
|
||||
return vddci_table->entries[i-1].value;
|
||||
}
|
||||
|
||||
int phm_find_boot_level(void *table,
|
||||
uint32_t value, uint32_t *boot_level)
|
||||
{
|
||||
int result = -EINVAL;
|
||||
uint32_t i;
|
||||
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
|
||||
|
||||
for (i = 0; i < dpm_table->count; i++) {
|
||||
if (value == dpm_table->dpm_level[i].value) {
|
||||
*boot_level = i;
|
||||
result = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
|
||||
phm_ppt_v1_voltage_lookup_table *lookup_table,
|
||||
uint16_t virtual_voltage_id, int32_t *sclk)
|
||||
{
|
||||
uint8_t entry_id;
|
||||
uint8_t voltage_id;
|
||||
struct phm_ppt_v1_information *table_info =
|
||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||
|
||||
PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
|
||||
|
||||
/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
|
||||
for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
|
||||
voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
|
||||
if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_id >= table_info->vdd_dep_on_sclk->count) {
|
||||
pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize Dynamic State Adjustment Rule Settings
|
||||
*
|
||||
* @param hwmgr the address of the powerplay hardware manager.
|
||||
*/
|
||||
int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
uint32_t table_size;
|
||||
struct phm_clock_voltage_dependency_table *table_clk_vlt;
|
||||
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||
|
||||
/* initialize vddc_dep_on_dal_pwrl table */
|
||||
table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
|
||||
table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
|
||||
|
||||
if (NULL == table_clk_vlt) {
|
||||
pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
table_clk_vlt->count = 4;
|
||||
table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
|
||||
table_clk_vlt->entries[0].v = 0;
|
||||
table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
|
||||
table_clk_vlt->entries[1].v = 720;
|
||||
table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
|
||||
table_clk_vlt->entries[2].v = 810;
|
||||
table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
|
||||
table_clk_vlt->entries[3].v = 900;
|
||||
if (pptable_info != NULL)
|
||||
pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
|
||||
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
|
||||
{
|
||||
uint32_t level = 0;
|
||||
|
||||
while (0 == (mask & (1 << level)))
|
||||
level++;
|
||||
|
||||
return level;
|
||||
}
|
||||
|
||||
void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct phm_ppt_v1_information *table_info =
|
||||
(struct phm_ppt_v1_information *)hwmgr->pptable;
|
||||
struct phm_clock_voltage_dependency_table *table =
|
||||
table_info->vddc_dep_on_dal_pwrl;
|
||||
struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
|
||||
enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
|
||||
uint32_t req_vddc = 0, req_volt, i;
|
||||
|
||||
if (!table || table->count <= 0
|
||||
|| dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
|
||||
|| dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
|
||||
return;
|
||||
|
||||
for (i = 0; i < table->count; i++) {
|
||||
if (dal_power_level == table->entries[i].clk) {
|
||||
req_vddc = table->entries[i].v;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
vddc_table = table_info->vdd_dep_on_sclk;
|
||||
for (i = 0; i < vddc_table->count; i++) {
|
||||
if (req_vddc <= vddc_table->entries[i].vddc) {
|
||||
req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_VddC_Request, req_volt);
|
||||
return;
|
||||
}
|
||||
}
|
||||
pr_err("DAL requested level can not"
|
||||
" found a available voltage in VDDC DPM Table \n");
|
||||
}
|
||||
|
||||
void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
|
@ -954,25 +442,6 @@ int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
||||
uint32_t sclk, uint16_t id, uint16_t *voltage)
|
||||
{
|
||||
uint32_t vol;
|
||||
int ret = 0;
|
||||
|
||||
if (hwmgr->chip_id < CHIP_TONGA) {
|
||||
ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
|
||||
} else if (hwmgr->chip_id < CHIP_POLARIS10) {
|
||||
ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
|
||||
if (*voltage >= 2000 || *voltage == 0)
|
||||
*voltage = 1150;
|
||||
} else {
|
||||
ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
|
||||
*voltage = (uint16_t)(vol/100);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue