mirror of https://gitee.com/openkylin/linux.git
Merge tag 'amd-drm-next-5.14-2021-06-16' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.14-2021-06-16: amdgpu: - Aldebaran fixes - Expose asic independent throttler status - BACO fixes for navi1x - Smartshift fixes - Misc code cleanups - RAS fixes for Sienna Cichlid - Gamma verificaton fixes - DC LTTPR fixes - DP AUX timeout handling fixes - GFX9, 10 powergating fixes amdkfd: - TLB flush fixes when using SDMA - Locking fixes - SVM fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210617031719.4013-1-alexander.deucher@amd.com
This commit is contained in:
commit
d472b36efb
|
@ -47,12 +47,6 @@ static struct {
|
|||
spinlock_t mem_limit_lock;
|
||||
} kfd_mem_limit;
|
||||
|
||||
/* Struct used for amdgpu_amdkfd_bo_validate */
|
||||
struct amdgpu_vm_parser {
|
||||
uint32_t domain;
|
||||
bool wait;
|
||||
};
|
||||
|
||||
static const char * const domain_bit_to_string[] = {
|
||||
"CPU",
|
||||
"GTT",
|
||||
|
@ -348,11 +342,9 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
|
||||
static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_vm_parser *p = param;
|
||||
|
||||
return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
|
||||
return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
|
||||
}
|
||||
|
||||
/* vm_validate_pt_pd_bos - Validate page table and directory BOs
|
||||
|
@ -364,28 +356,23 @@ static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
|
|||
*/
|
||||
static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
|
||||
{
|
||||
struct amdgpu_bo *pd = vm->root.base.bo;
|
||||
struct amdgpu_bo *pd = vm->root.bo;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
|
||||
struct amdgpu_vm_parser param;
|
||||
int ret;
|
||||
|
||||
param.domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
param.wait = false;
|
||||
|
||||
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
|
||||
¶m);
|
||||
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
|
||||
if (ret) {
|
||||
pr_err("failed to validate PT BOs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_amdkfd_validate(¶m, pd);
|
||||
ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
|
||||
if (ret) {
|
||||
pr_err("failed to validate PD\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
|
||||
vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
|
||||
|
||||
if (vm->use_cpu_for_update) {
|
||||
ret = amdgpu_bo_kmap(pd, NULL);
|
||||
|
@ -400,7 +387,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
|
|||
|
||||
static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
|
||||
{
|
||||
struct amdgpu_bo *pd = vm->root.base.bo;
|
||||
struct amdgpu_bo *pd = vm->root.bo;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
|
||||
int ret;
|
||||
|
||||
|
@ -652,7 +639,7 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
|
|||
}
|
||||
}
|
||||
|
||||
gobj = amdgpu_gem_prime_import(&adev->ddev, mem->dmabuf);
|
||||
gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
|
||||
if (IS_ERR(gobj))
|
||||
return PTR_ERR(gobj);
|
||||
|
||||
|
@ -1166,7 +1153,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
|
|||
|
||||
list_for_each_entry(peer_vm, &process_info->vm_list_head,
|
||||
vm_list_node) {
|
||||
struct amdgpu_bo *pd = peer_vm->root.base.bo;
|
||||
struct amdgpu_bo *pd = peer_vm->root.bo;
|
||||
|
||||
ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
|
||||
AMDGPU_SYNC_NE_OWNER,
|
||||
|
@ -1233,7 +1220,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
|
|||
vm->process_info = *process_info;
|
||||
|
||||
/* Validate page directory and attach eviction fence */
|
||||
ret = amdgpu_bo_reserve(vm->root.base.bo, true);
|
||||
ret = amdgpu_bo_reserve(vm->root.bo, true);
|
||||
if (ret)
|
||||
goto reserve_pd_fail;
|
||||
ret = vm_validate_pt_pd_bos(vm);
|
||||
|
@ -1241,16 +1228,16 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
|
|||
pr_err("validate_pt_pd_bos() failed\n");
|
||||
goto validate_pd_fail;
|
||||
}
|
||||
ret = amdgpu_bo_sync_wait(vm->root.base.bo,
|
||||
ret = amdgpu_bo_sync_wait(vm->root.bo,
|
||||
AMDGPU_FENCE_OWNER_KFD, false);
|
||||
if (ret)
|
||||
goto wait_pd_fail;
|
||||
ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
|
||||
ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
|
||||
if (ret)
|
||||
goto reserve_shared_fail;
|
||||
amdgpu_bo_fence(vm->root.base.bo,
|
||||
amdgpu_bo_fence(vm->root.bo,
|
||||
&vm->process_info->eviction_fence->base, true);
|
||||
amdgpu_bo_unreserve(vm->root.base.bo);
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
|
||||
/* Update process info */
|
||||
mutex_lock(&vm->process_info->lock);
|
||||
|
@ -1264,7 +1251,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
|
|||
reserve_shared_fail:
|
||||
wait_pd_fail:
|
||||
validate_pd_fail:
|
||||
amdgpu_bo_unreserve(vm->root.base.bo);
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
reserve_pd_fail:
|
||||
vm->process_info = NULL;
|
||||
if (info) {
|
||||
|
@ -1319,7 +1306,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
|||
struct amdgpu_vm *vm)
|
||||
{
|
||||
struct amdkfd_process_info *process_info = vm->process_info;
|
||||
struct amdgpu_bo *pd = vm->root.base.bo;
|
||||
struct amdgpu_bo *pd = vm->root.bo;
|
||||
|
||||
if (!process_info)
|
||||
return;
|
||||
|
@ -1375,7 +1362,7 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
|
|||
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
|
||||
{
|
||||
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
|
||||
struct amdgpu_bo *pd = avm->root.base.bo;
|
||||
struct amdgpu_bo *pd = avm->root.bo;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
|
||||
|
||||
if (adev->asic_type < CHIP_VEGA10)
|
||||
|
@ -2402,7 +2389,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
|||
/* Attach eviction fence to PD / PT BOs */
|
||||
list_for_each_entry(peer_vm, &process_info->vm_list_head,
|
||||
vm_list_node) {
|
||||
struct amdgpu_bo *bo = peer_vm->root.base.bo;
|
||||
struct amdgpu_bo *bo = peer_vm->root.bo;
|
||||
|
||||
amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
|
||||
}
|
||||
|
|
|
@ -832,7 +832,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
|
||||
p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
|
||||
|
||||
if (amdgpu_vm_debug) {
|
||||
/* Invalidate all BOs to test for userspace bugs */
|
||||
|
|
|
@ -1304,11 +1304,11 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
|
|||
|
||||
seq_printf(m, "pid:%d\tProcess:%s ----------\n",
|
||||
vm->task_info.pid, vm->task_info.process_name);
|
||||
r = amdgpu_bo_reserve(vm->root.base.bo, true);
|
||||
r = amdgpu_bo_reserve(vm->root.bo, true);
|
||||
if (r)
|
||||
break;
|
||||
amdgpu_debugfs_vm_bo_info(vm, m);
|
||||
amdgpu_bo_unreserve(vm->root.base.bo);
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->filelist_mutex);
|
||||
|
|
|
@ -4124,6 +4124,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
|
|||
{
|
||||
struct dma_fence *fence = NULL, *next = NULL;
|
||||
struct amdgpu_bo *shadow;
|
||||
struct amdgpu_bo_vm *vmbo;
|
||||
long r = 1, tmo;
|
||||
|
||||
if (amdgpu_sriov_runtime(adev))
|
||||
|
@ -4133,8 +4134,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
|
|||
|
||||
dev_info(adev->dev, "recover vram bo from shadow start\n");
|
||||
mutex_lock(&adev->shadow_list_lock);
|
||||
list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
|
||||
|
||||
list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
|
||||
shadow = &vmbo->bo;
|
||||
/* No need to recover an evicted BO */
|
||||
if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
|
||||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
|
||||
|
|
|
@ -325,7 +325,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
|
||||
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
|
||||
int *major, int *minor, int *revision)
|
||||
{
|
||||
struct binary_header *bhdr;
|
||||
|
@ -357,7 +357,7 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
|
|||
for (j = 0; j < num_ips; j++) {
|
||||
ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
|
||||
|
||||
if (le16_to_cpu(ip->hw_id) == hw_id) {
|
||||
if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) {
|
||||
if (major)
|
||||
*major = ip->major;
|
||||
if (minor)
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
void amdgpu_discovery_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
|
||||
void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev);
|
||||
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
|
||||
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
|
||||
int *major, int *minor, int *revision);
|
||||
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
|
||||
|
||||
|
|
|
@ -448,7 +448,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
|
|||
|
||||
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
|
||||
struct amdgpu_vm *vm = bo_base->vm;
|
||||
struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
|
||||
struct dma_resv *resv = vm->root.bo->tbo.base.resv;
|
||||
|
||||
if (ticket) {
|
||||
/* When we get an error here it means that somebody
|
||||
|
|
|
@ -69,13 +69,13 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
|
|||
dev = PCI_SLOT(adev->pdev->devfn);
|
||||
fn = PCI_FUNC(adev->pdev->devfn);
|
||||
|
||||
ret = amdgpu_bo_reserve(fpriv->vm.root.base.bo, false);
|
||||
ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
|
||||
if (ret) {
|
||||
DRM_ERROR("Fail to reserve bo\n");
|
||||
return;
|
||||
}
|
||||
amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, >t_mem, &cpu_mem);
|
||||
amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
|
||||
amdgpu_bo_unreserve(fpriv->vm.root.bo);
|
||||
seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
|
||||
dev, fn, fpriv->vm.pasid);
|
||||
seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
|
||||
|
|
|
@ -170,7 +170,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
|
|||
return -EPERM;
|
||||
|
||||
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
|
||||
abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
|
||||
abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
|
||||
return -EPERM;
|
||||
|
||||
r = amdgpu_bo_reserve(abo, false);
|
||||
|
@ -320,11 +320,11 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
|
||||
r = amdgpu_bo_reserve(vm->root.base.bo, false);
|
||||
r = amdgpu_bo_reserve(vm->root.bo, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
resv = vm->root.base.bo->tbo.base.resv;
|
||||
resv = vm->root.bo->tbo.base.resv;
|
||||
}
|
||||
|
||||
initial_domain = (u32)(0xffffffff & args->in.domains);
|
||||
|
@ -353,9 +353,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
if (!r) {
|
||||
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
abo->parent = amdgpu_bo_ref(vm->root.base.bo);
|
||||
abo->parent = amdgpu_bo_ref(vm->root.bo);
|
||||
}
|
||||
amdgpu_bo_unreserve(vm->root.base.bo);
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
}
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -841,7 +841,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
for (base = robj->vm_bo; base; base = base->next)
|
||||
if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
|
||||
amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
|
||||
amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
|
||||
r = -EINVAL;
|
||||
amdgpu_bo_unreserve(robj);
|
||||
goto out;
|
||||
|
|
|
@ -124,6 +124,22 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
|
|||
mutex_unlock(&mgpu_info.mutex);
|
||||
}
|
||||
|
||||
static void amdgpu_get_audio_func(struct amdgpu_device *adev)
|
||||
{
|
||||
struct pci_dev *p = NULL;
|
||||
|
||||
p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
|
||||
adev->pdev->bus->number, 1);
|
||||
if (p) {
|
||||
pm_runtime_get_sync(&p->dev);
|
||||
|
||||
pm_runtime_mark_last_busy(&p->dev);
|
||||
pm_runtime_put_autosuspend(&p->dev);
|
||||
|
||||
pci_dev_put(p);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_driver_load_kms - Main load function for KMS.
|
||||
*
|
||||
|
@ -213,9 +229,35 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
|
|||
DPM_FLAG_MAY_SKIP_RESUME);
|
||||
pm_runtime_use_autosuspend(dev->dev);
|
||||
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
|
||||
|
||||
pm_runtime_allow(dev->dev);
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
||||
/*
|
||||
* For runpm implemented via BACO, PMFW will handle the
|
||||
* timing for BACO in and out:
|
||||
* - put ASIC into BACO state only when both video and
|
||||
* audio functions are in D3 state.
|
||||
* - pull ASIC out of BACO state when either video or
|
||||
* audio function is in D0 state.
|
||||
* Also, at startup, PMFW assumes both functions are in
|
||||
* D0 state.
|
||||
*
|
||||
* So if snd driver was loaded prior to amdgpu driver
|
||||
* and audio function was put into D3 state, there will
|
||||
* be no PMFW-aware D-state transition(D0->D3) on runpm
|
||||
* suspend. Thus the BACO will be not correctly kicked in.
|
||||
*
|
||||
* Via amdgpu_get_audio_func(), the audio dev is put
|
||||
* into D0 state. Then there will be a PMFW-aware D-state
|
||||
* transition(D0->D3) on runpm suspend.
|
||||
*/
|
||||
if (amdgpu_device_supports_baco(dev) &&
|
||||
!(adev->flags & AMD_IS_APU) &&
|
||||
(adev->asic_type >= CHIP_NAVI10))
|
||||
amdgpu_get_audio_func(adev);
|
||||
}
|
||||
|
||||
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
|
||||
|
@ -1220,7 +1262,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
|||
}
|
||||
|
||||
pasid = fpriv->vm.pasid;
|
||||
pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
|
||||
pd = amdgpu_bo_ref(fpriv->vm.root.bo);
|
||||
|
||||
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
|
||||
amdgpu_vm_fini(adev, &fpriv->vm);
|
||||
|
|
|
@ -54,29 +54,42 @@
|
|||
|
||||
static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
|
||||
struct amdgpu_bo_user *ubo;
|
||||
|
||||
amdgpu_bo_kunmap(bo);
|
||||
|
||||
if (bo->tbo.base.import_attach)
|
||||
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
|
||||
drm_gem_object_release(&bo->tbo.base);
|
||||
amdgpu_bo_unref(&bo->parent);
|
||||
kvfree(bo);
|
||||
}
|
||||
|
||||
static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
|
||||
struct amdgpu_bo_user *ubo;
|
||||
|
||||
ubo = to_amdgpu_bo_user(bo);
|
||||
kfree(ubo->metadata);
|
||||
amdgpu_bo_destroy(tbo);
|
||||
}
|
||||
|
||||
static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
|
||||
struct amdgpu_bo_vm *vmbo;
|
||||
|
||||
vmbo = to_amdgpu_bo_vm(bo);
|
||||
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
|
||||
if (!list_empty(&bo->shadow_list)) {
|
||||
if (!list_empty(&vmbo->shadow_list)) {
|
||||
mutex_lock(&adev->shadow_list_lock);
|
||||
list_del_init(&bo->shadow_list);
|
||||
list_del_init(&vmbo->shadow_list);
|
||||
mutex_unlock(&adev->shadow_list_lock);
|
||||
}
|
||||
amdgpu_bo_unref(&bo->parent);
|
||||
|
||||
if (bo->tbo.type != ttm_bo_type_kernel) {
|
||||
ubo = to_amdgpu_bo_user(bo);
|
||||
kfree(ubo->metadata);
|
||||
}
|
||||
|
||||
kvfree(bo);
|
||||
amdgpu_bo_destroy(tbo);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -91,8 +104,11 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
|
|||
*/
|
||||
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
|
||||
{
|
||||
if (bo->destroy == &amdgpu_bo_destroy)
|
||||
if (bo->destroy == &amdgpu_bo_destroy ||
|
||||
bo->destroy == &amdgpu_bo_user_destroy ||
|
||||
bo->destroy == &amdgpu_bo_vm_destroy)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -545,7 +561,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||
if (bo == NULL)
|
||||
return -ENOMEM;
|
||||
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
|
||||
INIT_LIST_HEAD(&bo->shadow_list);
|
||||
bo->vm_bo = NULL;
|
||||
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
|
||||
bp->domain;
|
||||
|
@ -568,9 +583,12 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||
if (bp->type == ttm_bo_type_kernel)
|
||||
bo->tbo.priority = 1;
|
||||
|
||||
if (!bp->destroy)
|
||||
bp->destroy = &amdgpu_bo_destroy;
|
||||
|
||||
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
|
||||
&bo->placement, page_align, &ctx, NULL,
|
||||
bp->resv, &amdgpu_bo_destroy);
|
||||
bp->resv, bp->destroy);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
|
@ -634,6 +652,7 @@ int amdgpu_bo_create_user(struct amdgpu_device *adev,
|
|||
int r;
|
||||
|
||||
bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
|
||||
bp->destroy = &amdgpu_bo_user_destroy;
|
||||
r = amdgpu_bo_create(adev, bp, &bo_ptr);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -665,11 +684,13 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
|
|||
* num of amdgpu_vm_pt entries.
|
||||
*/
|
||||
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
|
||||
bp->destroy = &amdgpu_bo_vm_destroy;
|
||||
r = amdgpu_bo_create(adev, bp, &bo_ptr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
|
||||
INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -714,12 +735,12 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
|
|||
*
|
||||
* Insert a BO to the shadow list.
|
||||
*/
|
||||
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo)
|
||||
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
|
||||
|
||||
mutex_lock(&adev->shadow_list_lock);
|
||||
list_add_tail(&bo->shadow_list, &adev->shadow_list);
|
||||
list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
|
||||
mutex_unlock(&adev->shadow_list_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,8 @@ struct amdgpu_bo_param {
|
|||
u64 flags;
|
||||
enum ttm_bo_type type;
|
||||
bool no_wait_gpu;
|
||||
struct dma_resv *resv;
|
||||
struct dma_resv *resv;
|
||||
void (*destroy)(struct ttm_buffer_object *bo);
|
||||
};
|
||||
|
||||
/* bo virtual addresses in a vm */
|
||||
|
@ -108,9 +109,6 @@ struct amdgpu_bo {
|
|||
#ifdef CONFIG_MMU_NOTIFIER
|
||||
struct mmu_interval_notifier notifier;
|
||||
#endif
|
||||
|
||||
struct list_head shadow_list;
|
||||
|
||||
struct kgd_mem *kfd_bo;
|
||||
};
|
||||
|
||||
|
@ -126,7 +124,8 @@ struct amdgpu_bo_user {
|
|||
struct amdgpu_bo_vm {
|
||||
struct amdgpu_bo bo;
|
||||
struct amdgpu_bo *shadow;
|
||||
struct amdgpu_vm_pt entries[];
|
||||
struct list_head shadow_list;
|
||||
struct amdgpu_vm_bo_base entries[];
|
||||
};
|
||||
|
||||
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
|
||||
|
@ -332,7 +331,7 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
|
|||
int amdgpu_bo_validate(struct amdgpu_bo *bo);
|
||||
void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
|
||||
uint64_t *gtt_mem, uint64_t *cpu_mem);
|
||||
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo);
|
||||
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
|
||||
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
|
||||
struct dma_fence **fence);
|
||||
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
|
||||
|
|
|
@ -171,11 +171,81 @@ static int psp_memory_training_init(struct psp_context *psp)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper funciton to query psp runtime database entry
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @entry_type: the type of psp runtime database entry
|
||||
* @db_entry: runtime database entry pointer
|
||||
*
|
||||
* Return false if runtime database doesn't exit or entry is invalid
|
||||
* or true if the specific database entry is found, and copy to @db_entry
|
||||
*/
|
||||
static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
|
||||
enum psp_runtime_entry_type entry_type,
|
||||
void *db_entry)
|
||||
{
|
||||
uint64_t db_header_pos, db_dir_pos;
|
||||
struct psp_runtime_data_header db_header = {0};
|
||||
struct psp_runtime_data_directory db_dir = {0};
|
||||
bool ret = false;
|
||||
int i;
|
||||
|
||||
db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
|
||||
db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
|
||||
|
||||
/* read runtime db header from vram */
|
||||
amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
|
||||
sizeof(struct psp_runtime_data_header), false);
|
||||
|
||||
if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
|
||||
/* runtime db doesn't exist, exit */
|
||||
dev_warn(adev->dev, "PSP runtime database doesn't exist\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* read runtime database entry from vram */
|
||||
amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
|
||||
sizeof(struct psp_runtime_data_directory), false);
|
||||
|
||||
if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
|
||||
/* invalid db entry count, exit */
|
||||
dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* look up for requested entry type */
|
||||
for (i = 0; i < db_dir.entry_count && !ret; i++) {
|
||||
if (db_dir.entry_list[i].entry_type == entry_type) {
|
||||
switch (entry_type) {
|
||||
case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
|
||||
if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
|
||||
/* invalid db entry size */
|
||||
dev_warn(adev->dev, "Invalid PSP runtime database entry size\n");
|
||||
return false;
|
||||
}
|
||||
/* read runtime database entry */
|
||||
amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
|
||||
(uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
|
||||
ret = true;
|
||||
break;
|
||||
default:
|
||||
ret = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
int ret;
|
||||
struct psp_runtime_boot_cfg_entry boot_cfg_entry;
|
||||
struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
ret = psp_init_microcode(psp);
|
||||
|
@ -191,15 +261,39 @@ static int psp_sw_init(void *handle)
|
|||
}
|
||||
}
|
||||
|
||||
ret = psp_memory_training_init(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to initialize memory training!\n");
|
||||
return ret;
|
||||
memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
|
||||
if (psp_get_runtime_db_entry(adev,
|
||||
PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
|
||||
&boot_cfg_entry)) {
|
||||
psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
|
||||
if ((psp->boot_cfg_bitmask) &
|
||||
BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
|
||||
/* If psp runtime database exists, then
|
||||
* only enable two stage memory training
|
||||
* when TWO_STAGE_DRAM_TRAINING bit is set
|
||||
* in runtime database */
|
||||
mem_training_ctx->enable_mem_training = true;
|
||||
}
|
||||
|
||||
} else {
|
||||
/* If psp runtime database doesn't exist or
|
||||
* is invalid, force enable two stage memory
|
||||
* training */
|
||||
mem_training_ctx->enable_mem_training = true;
|
||||
}
|
||||
ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to process memory training!\n");
|
||||
return ret;
|
||||
|
||||
if (mem_training_ctx->enable_mem_training) {
|
||||
ret = psp_memory_training_init(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to initialize memory training!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to process memory training!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
|
||||
|
@ -551,7 +645,30 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int psp_boot_config_set(struct amdgpu_device *adev)
|
||||
static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
|
||||
{
|
||||
struct psp_context *psp = &adev->psp;
|
||||
struct psp_gfx_cmd_resp *cmd = psp->cmd;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
|
||||
|
||||
cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
|
||||
cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||
if (!ret) {
|
||||
*boot_cfg =
|
||||
(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
|
||||
{
|
||||
struct psp_context *psp = &adev->psp;
|
||||
struct psp_gfx_cmd_resp *cmd = psp->cmd;
|
||||
|
@ -563,8 +680,8 @@ static int psp_boot_config_set(struct amdgpu_device *adev)
|
|||
|
||||
cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
|
||||
cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
|
||||
cmd->cmd.boot_cfg.boot_config = BOOT_CONFIG_GECC;
|
||||
cmd->cmd.boot_cfg.boot_config_valid = BOOT_CONFIG_GECC;
|
||||
cmd->cmd.boot_cfg.boot_config = boot_cfg;
|
||||
cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
|
||||
|
||||
return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||
}
|
||||
|
@ -1212,19 +1329,62 @@ static int psp_ras_terminate(struct psp_context *psp)
|
|||
static int psp_ras_initialize(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
uint32_t boot_cfg = 0xFF;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
/*
|
||||
* TODO: bypass the initialize in sriov for now
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->adev->psp.ta_ras_ucode_size ||
|
||||
!psp->adev->psp.ta_ras_start_addr) {
|
||||
dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n");
|
||||
if (!adev->psp.ta_ras_ucode_size ||
|
||||
!adev->psp.ta_ras_start_addr) {
|
||||
dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
|
||||
/* query GECC enablement status from boot config
|
||||
* boot_cfg: 1: GECC is enabled or 0: GECC is disabled
|
||||
*/
|
||||
ret = psp_boot_config_get(adev, &boot_cfg);
|
||||
if (ret)
|
||||
dev_warn(adev->dev, "PSP get boot config failed\n");
|
||||
|
||||
if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
|
||||
if (!boot_cfg) {
|
||||
dev_info(adev->dev, "GECC is disabled\n");
|
||||
} else {
|
||||
/* disable GECC in next boot cycle if ras is
|
||||
* disabled by module parameter amdgpu_ras_enable
|
||||
* and/or amdgpu_ras_mask, or boot_config_get call
|
||||
* is failed
|
||||
*/
|
||||
ret = psp_boot_config_set(adev, 0);
|
||||
if (ret)
|
||||
dev_warn(adev->dev, "PSP set boot config failed\n");
|
||||
else
|
||||
dev_warn(adev->dev, "GECC will be disabled in next boot cycle "
|
||||
"if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
|
||||
}
|
||||
} else {
|
||||
if (1 == boot_cfg) {
|
||||
dev_info(adev->dev, "GECC is enabled\n");
|
||||
} else {
|
||||
/* enable GECC in next boot cycle if it is disabled
|
||||
* in boot config, or force enable GECC if failed to
|
||||
* get boot configuration
|
||||
*/
|
||||
ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
|
||||
if (ret)
|
||||
dev_warn(adev->dev, "PSP set boot config failed\n");
|
||||
else
|
||||
dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!psp->ras.ras_initialized) {
|
||||
ret = psp_ras_init_shared_buf(psp);
|
||||
if (ret)
|
||||
|
@ -1945,12 +2105,6 @@ static int psp_hw_start(struct psp_context *psp)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
|
||||
ret = psp_boot_config_set(adev);
|
||||
if (ret)
|
||||
dev_warn(adev->dev, "PSP set boot config failed\n");
|
||||
}
|
||||
|
||||
ret = psp_tmr_init(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP tmr init failed!\n");
|
||||
|
@ -2188,10 +2342,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
|
|||
if ((amdgpu_in_reset(adev) &&
|
||||
ras && adev->ras_enabled &&
|
||||
(adev->asic_type == CHIP_ARCTURUS ||
|
||||
adev->asic_type == CHIP_VEGA20)) ||
|
||||
(adev->in_runpm &&
|
||||
adev->asic_type >= CHIP_NAVI10 &&
|
||||
adev->asic_type <= CHIP_NAVI12)) {
|
||||
adev->asic_type == CHIP_VEGA20))) {
|
||||
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
|
||||
if (ret) {
|
||||
DRM_WARN("Failed to set MP1 state prepare for reload\n");
|
||||
|
@ -2562,10 +2713,12 @@ static int psp_resume(void *handle)
|
|||
|
||||
DRM_INFO("PSP is resuming...\n");
|
||||
|
||||
ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to process memory training!\n");
|
||||
return ret;
|
||||
if (psp->mem_train_ctx.enable_mem_training) {
|
||||
ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to process memory training!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&adev->firmware.mutex);
|
||||
|
@ -2749,7 +2902,7 @@ int psp_init_asd_microcode(struct psp_context *psp,
|
|||
|
||||
asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
|
||||
adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
|
||||
adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
|
||||
adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
|
||||
adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
|
||||
adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
|
||||
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
|
||||
|
@ -2785,7 +2938,7 @@ int psp_init_toc_microcode(struct psp_context *psp,
|
|||
|
||||
toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
|
||||
adev->psp.toc_fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
|
||||
adev->psp.toc_feature_version = le32_to_cpu(toc_hdr->ucode_feature_version);
|
||||
adev->psp.toc_feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
|
||||
adev->psp.toc_bin_size = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
|
||||
adev->psp.toc_start_addr = (uint8_t *)toc_hdr +
|
||||
le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
|
||||
|
@ -2797,6 +2950,50 @@ int psp_init_toc_microcode(struct psp_context *psp,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int psp_init_sos_base_fw(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct psp_firmware_header_v1_0 *sos_hdr;
|
||||
const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
|
||||
uint8_t *ucode_array_start_addr;
|
||||
|
||||
sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
|
||||
ucode_array_start_addr = (uint8_t *)sos_hdr +
|
||||
le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
if (adev->gmc.xgmi.connected_to_cpu || (adev->asic_type != CHIP_ALDEBARAN)) {
|
||||
adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
|
||||
adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
|
||||
|
||||
adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos.offset_bytes);
|
||||
adev->psp.sys_start_addr = ucode_array_start_addr;
|
||||
|
||||
adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos.size_bytes);
|
||||
adev->psp.sos_start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr->sos.offset_bytes);
|
||||
} else {
|
||||
/* Load alternate PSP SOS FW */
|
||||
sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
|
||||
|
||||
adev->psp.sos_fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
|
||||
adev->psp.sos_feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
|
||||
|
||||
adev->psp.sys_bin_size = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
|
||||
adev->psp.sys_start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
|
||||
|
||||
adev->psp.sos_bin_size = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
|
||||
adev->psp.sos_start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
|
||||
}
|
||||
|
||||
if ((adev->psp.sys_bin_size == 0) || (adev->psp.sos_bin_size == 0)) {
|
||||
dev_warn(adev->dev, "PSP SOS FW not available");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int psp_init_sos_microcode(struct psp_context *psp,
|
||||
const char *chip_name)
|
||||
{
|
||||
|
@ -2807,6 +3004,7 @@ int psp_init_sos_microcode(struct psp_context *psp,
|
|||
const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
|
||||
const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
|
||||
int err = 0;
|
||||
uint8_t *ucode_array_start_addr;
|
||||
|
||||
if (!chip_name) {
|
||||
dev_err(adev->dev, "invalid chip name for sos microcode\n");
|
||||
|
@ -2823,47 +3021,45 @@ int psp_init_sos_microcode(struct psp_context *psp,
|
|||
goto out;
|
||||
|
||||
sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
|
||||
ucode_array_start_addr = (uint8_t *)sos_hdr +
|
||||
le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
|
||||
amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
|
||||
|
||||
switch (sos_hdr->header.header_version_major) {
|
||||
case 1:
|
||||
adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
|
||||
adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
|
||||
adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
|
||||
adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
|
||||
adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
|
||||
le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
|
||||
adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
|
||||
le32_to_cpu(sos_hdr->sos_offset_bytes);
|
||||
err = psp_init_sos_base_fw(adev);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (sos_hdr->header.header_version_minor == 1) {
|
||||
sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
|
||||
adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
|
||||
adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
|
||||
adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
|
||||
adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
|
||||
le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
|
||||
adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
|
||||
adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
|
||||
le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
|
||||
}
|
||||
if (sos_hdr->header.header_version_minor == 2) {
|
||||
sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
|
||||
adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
|
||||
adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
|
||||
adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
|
||||
le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
|
||||
}
|
||||
if (sos_hdr->header.header_version_minor == 3) {
|
||||
sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
|
||||
adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc_size_bytes);
|
||||
adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->v1_1.toc_offset_bytes);
|
||||
adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_size_bytes);
|
||||
adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_offset_bytes);
|
||||
adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes);
|
||||
adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes);
|
||||
adev->psp.rl_bin_size = le32_to_cpu(sos_hdr_v1_3->rl_size_bytes);
|
||||
adev->psp.rl_start_addr = (uint8_t *)adev->psp.sys_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->rl_offset_bytes);
|
||||
adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
|
||||
adev->psp.toc_start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
|
||||
adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
|
||||
adev->psp.kdb_start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
|
||||
adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
|
||||
adev->psp.spl_start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
|
||||
adev->psp.rl_bin_size = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
|
||||
adev->psp.rl_start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -225,6 +225,61 @@ struct psp_memory_training_context {
|
|||
|
||||
enum psp_memory_training_init_flag init;
|
||||
u32 training_cnt;
|
||||
bool enable_mem_training;
|
||||
};
|
||||
|
||||
/** PSP runtime DB **/
|
||||
#define PSP_RUNTIME_DB_SIZE_IN_BYTES 0x10000
|
||||
#define PSP_RUNTIME_DB_OFFSET 0x100000
|
||||
#define PSP_RUNTIME_DB_COOKIE_ID 0x0ed5
|
||||
#define PSP_RUNTIME_DB_VER_1 0x0100
|
||||
#define PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT 0x40
|
||||
|
||||
enum psp_runtime_entry_type {
|
||||
PSP_RUNTIME_ENTRY_TYPE_INVALID = 0x0,
|
||||
PSP_RUNTIME_ENTRY_TYPE_TEST = 0x1,
|
||||
PSP_RUNTIME_ENTRY_TYPE_MGPU_COMMON = 0x2, /* Common mGPU runtime data */
|
||||
PSP_RUNTIME_ENTRY_TYPE_MGPU_WAFL = 0x3, /* WAFL runtime data */
|
||||
PSP_RUNTIME_ENTRY_TYPE_MGPU_XGMI = 0x4, /* XGMI runtime data */
|
||||
PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG = 0x5, /* Boot Config runtime data */
|
||||
};
|
||||
|
||||
/* PSP runtime DB header */
|
||||
struct psp_runtime_data_header {
|
||||
/* determine the existence of runtime db */
|
||||
uint16_t cookie;
|
||||
/* version of runtime db */
|
||||
uint16_t version;
|
||||
};
|
||||
|
||||
/* PSP runtime DB entry */
|
||||
struct psp_runtime_entry {
|
||||
/* type of runtime db entry */
|
||||
uint32_t entry_type;
|
||||
/* offset of entry in bytes */
|
||||
uint16_t offset;
|
||||
/* size of entry in bytes */
|
||||
uint16_t size;
|
||||
};
|
||||
|
||||
/* PSP runtime DB directory */
|
||||
struct psp_runtime_data_directory {
|
||||
/* number of valid entries */
|
||||
uint16_t entry_count;
|
||||
/* db entries*/
|
||||
struct psp_runtime_entry entry_list[PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT];
|
||||
};
|
||||
|
||||
/* PSP runtime DB boot config feature bitmask */
|
||||
enum psp_runtime_boot_cfg_feature {
|
||||
BOOT_CFG_FEATURE_GECC = 0x1,
|
||||
BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING = 0x2,
|
||||
};
|
||||
|
||||
/* PSP runtime DB boot config entry */
|
||||
struct psp_runtime_boot_cfg_entry {
|
||||
uint32_t boot_cfg_bitmask;
|
||||
uint32_t reserved;
|
||||
};
|
||||
|
||||
struct psp_context
|
||||
|
@ -325,6 +380,8 @@ struct psp_context
|
|||
struct psp_securedisplay_context securedisplay_context;
|
||||
struct mutex mutex;
|
||||
struct psp_memory_training_context mem_train_ctx;
|
||||
|
||||
uint32_t boot_cfg_bitmask;
|
||||
};
|
||||
|
||||
struct amdgpu_psp_funcs {
|
||||
|
|
|
@ -2122,7 +2122,7 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
|
|||
struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
|
||||
ras_counte_delay_work.work);
|
||||
struct amdgpu_device *adev = con->adev;
|
||||
struct drm_device *dev = &adev->ddev;
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
unsigned long ce_count, ue_count;
|
||||
int res;
|
||||
|
||||
|
|
|
@ -257,36 +257,36 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr)
|
|||
container_of(hdr, struct psp_firmware_header_v1_0, header);
|
||||
|
||||
DRM_DEBUG("ucode_feature_version: %u\n",
|
||||
le32_to_cpu(psp_hdr->ucode_feature_version));
|
||||
le32_to_cpu(psp_hdr->sos.fw_version));
|
||||
DRM_DEBUG("sos_offset_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr->sos_offset_bytes));
|
||||
le32_to_cpu(psp_hdr->sos.offset_bytes));
|
||||
DRM_DEBUG("sos_size_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr->sos_size_bytes));
|
||||
le32_to_cpu(psp_hdr->sos.size_bytes));
|
||||
if (version_minor == 1) {
|
||||
const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 =
|
||||
container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0);
|
||||
DRM_DEBUG("toc_header_version: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_1->toc_header_version));
|
||||
le32_to_cpu(psp_hdr_v1_1->toc.fw_version));
|
||||
DRM_DEBUG("toc_offset_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_1->toc_offset_bytes));
|
||||
le32_to_cpu(psp_hdr_v1_1->toc.offset_bytes));
|
||||
DRM_DEBUG("toc_size_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_1->toc_size_bytes));
|
||||
le32_to_cpu(psp_hdr_v1_1->toc.size_bytes));
|
||||
DRM_DEBUG("kdb_header_version: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_1->kdb_header_version));
|
||||
le32_to_cpu(psp_hdr_v1_1->kdb.fw_version));
|
||||
DRM_DEBUG("kdb_offset_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_1->kdb_offset_bytes));
|
||||
le32_to_cpu(psp_hdr_v1_1->kdb.offset_bytes));
|
||||
DRM_DEBUG("kdb_size_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_1->kdb_size_bytes));
|
||||
le32_to_cpu(psp_hdr_v1_1->kdb.size_bytes));
|
||||
}
|
||||
if (version_minor == 2) {
|
||||
const struct psp_firmware_header_v1_2 *psp_hdr_v1_2 =
|
||||
container_of(psp_hdr, struct psp_firmware_header_v1_2, v1_0);
|
||||
DRM_DEBUG("kdb_header_version: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_2->kdb_header_version));
|
||||
le32_to_cpu(psp_hdr_v1_2->kdb.fw_version));
|
||||
DRM_DEBUG("kdb_offset_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_2->kdb_offset_bytes));
|
||||
le32_to_cpu(psp_hdr_v1_2->kdb.offset_bytes));
|
||||
DRM_DEBUG("kdb_size_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_2->kdb_size_bytes));
|
||||
le32_to_cpu(psp_hdr_v1_2->kdb.size_bytes));
|
||||
}
|
||||
if (version_minor == 3) {
|
||||
const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 =
|
||||
|
@ -294,23 +294,23 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr)
|
|||
const struct psp_firmware_header_v1_3 *psp_hdr_v1_3 =
|
||||
container_of(psp_hdr_v1_1, struct psp_firmware_header_v1_3, v1_1);
|
||||
DRM_DEBUG("toc_header_version: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_3->v1_1.toc_header_version));
|
||||
le32_to_cpu(psp_hdr_v1_3->v1_1.toc.fw_version));
|
||||
DRM_DEBUG("toc_offset_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_3->v1_1.toc_offset_bytes));
|
||||
le32_to_cpu(psp_hdr_v1_3->v1_1.toc.offset_bytes));
|
||||
DRM_DEBUG("toc_size_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_3->v1_1.toc_size_bytes));
|
||||
le32_to_cpu(psp_hdr_v1_3->v1_1.toc.size_bytes));
|
||||
DRM_DEBUG("kdb_header_version: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_3->v1_1.kdb_header_version));
|
||||
le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.fw_version));
|
||||
DRM_DEBUG("kdb_offset_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_3->v1_1.kdb_offset_bytes));
|
||||
le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.offset_bytes));
|
||||
DRM_DEBUG("kdb_size_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_3->v1_1.kdb_size_bytes));
|
||||
le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.size_bytes));
|
||||
DRM_DEBUG("spl_header_version: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_3->spl_header_version));
|
||||
le32_to_cpu(psp_hdr_v1_3->spl.fw_version));
|
||||
DRM_DEBUG("spl_offset_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_3->spl_offset_bytes));
|
||||
le32_to_cpu(psp_hdr_v1_3->spl.offset_bytes));
|
||||
DRM_DEBUG("spl_size_bytes: %u\n",
|
||||
le32_to_cpu(psp_hdr_v1_3->spl_size_bytes));
|
||||
le32_to_cpu(psp_hdr_v1_3->spl.size_bytes));
|
||||
}
|
||||
} else {
|
||||
DRM_ERROR("Unknown PSP ucode version: %u.%u\n",
|
||||
|
|
|
@ -71,43 +71,39 @@ struct smc_firmware_header_v2_1 {
|
|||
uint32_t pptable_entry_offset;
|
||||
};
|
||||
|
||||
struct psp_fw_bin_desc {
|
||||
uint32_t fw_version;
|
||||
uint32_t offset_bytes;
|
||||
uint32_t size_bytes;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct psp_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ucode_feature_version;
|
||||
uint32_t sos_offset_bytes;
|
||||
uint32_t sos_size_bytes;
|
||||
struct psp_fw_bin_desc sos;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=1 */
|
||||
struct psp_firmware_header_v1_1 {
|
||||
struct psp_firmware_header_v1_0 v1_0;
|
||||
uint32_t toc_header_version;
|
||||
uint32_t toc_offset_bytes;
|
||||
uint32_t toc_size_bytes;
|
||||
uint32_t kdb_header_version;
|
||||
uint32_t kdb_offset_bytes;
|
||||
uint32_t kdb_size_bytes;
|
||||
struct psp_fw_bin_desc toc;
|
||||
struct psp_fw_bin_desc kdb;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=2 */
|
||||
struct psp_firmware_header_v1_2 {
|
||||
struct psp_firmware_header_v1_0 v1_0;
|
||||
uint32_t reserve[3];
|
||||
uint32_t kdb_header_version;
|
||||
uint32_t kdb_offset_bytes;
|
||||
uint32_t kdb_size_bytes;
|
||||
struct psp_fw_bin_desc res;
|
||||
struct psp_fw_bin_desc kdb;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=3 */
|
||||
struct psp_firmware_header_v1_3 {
|
||||
struct psp_firmware_header_v1_1 v1_1;
|
||||
uint32_t spl_header_version;
|
||||
uint32_t spl_offset_bytes;
|
||||
uint32_t spl_size_bytes;
|
||||
uint32_t rl_header_version;
|
||||
uint32_t rl_offset_bytes;
|
||||
uint32_t rl_size_bytes;
|
||||
struct psp_fw_bin_desc spl;
|
||||
struct psp_fw_bin_desc rl;
|
||||
struct psp_fw_bin_desc sys_drv_aux;
|
||||
struct psp_fw_bin_desc sos_aux;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
|
|
|
@ -332,7 +332,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
|||
base->next = bo->vm_bo;
|
||||
bo->vm_bo = base;
|
||||
|
||||
if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
|
||||
if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
|
||||
return;
|
||||
|
||||
vm->bulk_moveable = false;
|
||||
|
@ -361,14 +361,14 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
|||
* Helper to get the parent entry for the child page table. NULL if we are at
|
||||
* the root page directory.
|
||||
*/
|
||||
static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
|
||||
static struct amdgpu_vm_bo_base *amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
|
||||
{
|
||||
struct amdgpu_bo *parent = pt->base.bo->parent;
|
||||
struct amdgpu_bo *parent = pt->bo->parent;
|
||||
|
||||
if (!parent)
|
||||
return NULL;
|
||||
|
||||
return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
|
||||
return parent->vm_bo;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -376,8 +376,8 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
|
|||
*/
|
||||
struct amdgpu_vm_pt_cursor {
|
||||
uint64_t pfn;
|
||||
struct amdgpu_vm_pt *parent;
|
||||
struct amdgpu_vm_pt *entry;
|
||||
struct amdgpu_vm_bo_base *parent;
|
||||
struct amdgpu_vm_bo_base *entry;
|
||||
unsigned level;
|
||||
};
|
||||
|
||||
|
@ -416,17 +416,17 @@ static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
|
|||
{
|
||||
unsigned mask, shift, idx;
|
||||
|
||||
if (!cursor->entry->entries)
|
||||
if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
|
||||
!cursor->entry->bo)
|
||||
return false;
|
||||
|
||||
BUG_ON(!cursor->entry->base.bo);
|
||||
mask = amdgpu_vm_entries_mask(adev, cursor->level);
|
||||
shift = amdgpu_vm_level_shift(adev, cursor->level);
|
||||
|
||||
++cursor->level;
|
||||
idx = (cursor->pfn >> shift) & mask;
|
||||
cursor->parent = cursor->entry;
|
||||
cursor->entry = &cursor->entry->entries[idx];
|
||||
cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -453,7 +453,7 @@ static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
|
|||
shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
|
||||
num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
|
||||
|
||||
if (cursor->entry == &cursor->parent->entries[num_entries - 1])
|
||||
if (cursor->entry == &to_amdgpu_bo_vm(cursor->parent->bo)->entries[num_entries - 1])
|
||||
return false;
|
||||
|
||||
cursor->pfn += 1ULL << shift;
|
||||
|
@ -539,7 +539,7 @@ static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
|
|||
* True when the search should continue, false otherwise.
|
||||
*/
|
||||
static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
|
||||
struct amdgpu_vm_pt *entry)
|
||||
struct amdgpu_vm_bo_base *entry)
|
||||
{
|
||||
return entry && (!start || entry != start->entry);
|
||||
}
|
||||
|
@ -590,7 +590,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
|||
struct amdgpu_bo_list_entry *entry)
|
||||
{
|
||||
entry->priority = 0;
|
||||
entry->tv.bo = &vm->root.base.bo->tbo;
|
||||
entry->tv.bo = &vm->root.bo->tbo;
|
||||
/* Two for VM updates, one for TTM and one for the CS job */
|
||||
entry->tv.num_shared = 4;
|
||||
entry->user_pages = NULL;
|
||||
|
@ -622,7 +622,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
|
|||
for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
|
||||
struct amdgpu_vm *vm = bo_base->vm;
|
||||
|
||||
if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
|
||||
if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv)
|
||||
vm->bulk_moveable = false;
|
||||
}
|
||||
|
||||
|
@ -781,11 +781,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||
entries -= ats_entries;
|
||||
|
||||
} else {
|
||||
struct amdgpu_vm_pt *pt;
|
||||
struct amdgpu_vm_bo_base *pt;
|
||||
|
||||
pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base);
|
||||
pt = ancestor->vm_bo;
|
||||
ats_entries = amdgpu_vm_num_ats_entries(adev);
|
||||
if ((pt - vm->root.entries) >= ats_entries) {
|
||||
if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) {
|
||||
ats_entries = 0;
|
||||
} else {
|
||||
ats_entries = entries;
|
||||
|
@ -902,8 +902,8 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
|
|||
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.no_wait_gpu = immediate;
|
||||
if (vm->root.base.bo)
|
||||
bp.resv = vm->root.base.bo->tbo.base.resv;
|
||||
if (vm->root.bo)
|
||||
bp.resv = vm->root.bo->tbo.base.resv;
|
||||
|
||||
r = amdgpu_bo_create_vm(adev, &bp, vmbo);
|
||||
if (r)
|
||||
|
@ -938,7 +938,7 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
(*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
|
||||
amdgpu_bo_add_to_shadow_list((*vmbo)->shadow);
|
||||
amdgpu_bo_add_to_shadow_list(*vmbo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -962,19 +962,13 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
|
|||
struct amdgpu_vm_pt_cursor *cursor,
|
||||
bool immediate)
|
||||
{
|
||||
struct amdgpu_vm_pt *entry = cursor->entry;
|
||||
struct amdgpu_vm_bo_base *entry = cursor->entry;
|
||||
struct amdgpu_bo *pt_bo;
|
||||
struct amdgpu_bo_vm *pt;
|
||||
int r;
|
||||
|
||||
if (entry->base.bo) {
|
||||
if (cursor->level < AMDGPU_VM_PTB)
|
||||
entry->entries =
|
||||
to_amdgpu_bo_vm(entry->base.bo)->entries;
|
||||
else
|
||||
entry->entries = NULL;
|
||||
if (entry->bo)
|
||||
return 0;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
|
||||
if (r)
|
||||
|
@ -984,13 +978,8 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
|
|||
* freeing them up in the wrong order.
|
||||
*/
|
||||
pt_bo = &pt->bo;
|
||||
pt_bo->parent = amdgpu_bo_ref(cursor->parent->base.bo);
|
||||
amdgpu_vm_bo_base_init(&entry->base, vm, pt_bo);
|
||||
if (cursor->level < AMDGPU_VM_PTB)
|
||||
entry->entries = pt->entries;
|
||||
else
|
||||
entry->entries = NULL;
|
||||
|
||||
pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
|
||||
amdgpu_vm_bo_base_init(entry, vm, pt_bo);
|
||||
r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
|
||||
if (r)
|
||||
goto error_free_pt;
|
||||
|
@ -1008,18 +997,17 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
|
|||
*
|
||||
* @entry: PDE to free
|
||||
*/
|
||||
static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
|
||||
static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry)
|
||||
{
|
||||
struct amdgpu_bo *shadow;
|
||||
|
||||
if (entry->base.bo) {
|
||||
shadow = amdgpu_bo_shadowed(entry->base.bo);
|
||||
entry->base.bo->vm_bo = NULL;
|
||||
list_del(&entry->base.vm_status);
|
||||
amdgpu_bo_unref(&shadow);
|
||||
amdgpu_bo_unref(&entry->base.bo);
|
||||
}
|
||||
entry->entries = NULL;
|
||||
if (!entry->bo)
|
||||
return;
|
||||
shadow = amdgpu_bo_shadowed(entry->bo);
|
||||
entry->bo->vm_bo = NULL;
|
||||
list_del(&entry->vm_status);
|
||||
amdgpu_bo_unref(&shadow);
|
||||
amdgpu_bo_unref(&entry->bo);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1036,7 +1024,7 @@ static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
|
|||
struct amdgpu_vm_pt_cursor *start)
|
||||
{
|
||||
struct amdgpu_vm_pt_cursor cursor;
|
||||
struct amdgpu_vm_pt *entry;
|
||||
struct amdgpu_vm_bo_base *entry;
|
||||
|
||||
vm->bulk_moveable = false;
|
||||
|
||||
|
@ -1304,10 +1292,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
|
|||
*/
|
||||
static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_vm_pt *entry)
|
||||
struct amdgpu_vm_bo_base *entry)
|
||||
{
|
||||
struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
|
||||
struct amdgpu_bo *bo = parent->base.bo, *pbo;
|
||||
struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
|
||||
struct amdgpu_bo *bo = parent->bo, *pbo;
|
||||
uint64_t pde, pt, flags;
|
||||
unsigned level;
|
||||
|
||||
|
@ -1315,8 +1303,8 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
|
|||
pbo = pbo->parent;
|
||||
|
||||
level += params->adev->vm_manager.root_level;
|
||||
amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
|
||||
pde = (entry - parent->entries) * 8;
|
||||
amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
|
||||
pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
|
||||
return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
|
||||
1, 0, flags);
|
||||
}
|
||||
|
@ -1333,11 +1321,11 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
|
|||
struct amdgpu_vm *vm)
|
||||
{
|
||||
struct amdgpu_vm_pt_cursor cursor;
|
||||
struct amdgpu_vm_pt *entry;
|
||||
struct amdgpu_vm_bo_base *entry;
|
||||
|
||||
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
|
||||
if (entry->base.bo && !entry->base.moved)
|
||||
amdgpu_vm_bo_relocated(&entry->base);
|
||||
if (entry->bo && !entry->moved)
|
||||
amdgpu_vm_bo_relocated(entry);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1371,11 +1359,12 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
|||
return r;
|
||||
|
||||
while (!list_empty(&vm->relocated)) {
|
||||
struct amdgpu_vm_pt *entry;
|
||||
struct amdgpu_vm_bo_base *entry;
|
||||
|
||||
entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
|
||||
base.vm_status);
|
||||
amdgpu_vm_bo_idle(&entry->base);
|
||||
entry = list_first_entry(&vm->relocated,
|
||||
struct amdgpu_vm_bo_base,
|
||||
vm_status);
|
||||
amdgpu_vm_bo_idle(entry);
|
||||
|
||||
r = amdgpu_vm_update_pde(¶ms, vm, entry);
|
||||
if (r)
|
||||
|
@ -1555,7 +1544,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
|
|||
continue;
|
||||
}
|
||||
|
||||
pt = cursor.entry->base.bo;
|
||||
pt = cursor.entry->bo;
|
||||
if (!pt) {
|
||||
/* We need all PDs and PTs for mapping something, */
|
||||
if (flags & AMDGPU_PTE_VALID)
|
||||
|
@ -1567,7 +1556,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
|
|||
if (!amdgpu_vm_pt_ancestor(&cursor))
|
||||
return -EINVAL;
|
||||
|
||||
pt = cursor.entry->base.bo;
|
||||
pt = cursor.entry->bo;
|
||||
shift = parent_shift;
|
||||
frag_end = max(frag_end, ALIGN(frag_start + 1,
|
||||
1ULL << shift));
|
||||
|
@ -1622,7 +1611,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
|
|||
*/
|
||||
while (cursor.pfn < frag_start) {
|
||||
/* Make sure previous mapping is freed */
|
||||
if (cursor.entry->base.bo) {
|
||||
if (cursor.entry->bo) {
|
||||
params->table_freed = true;
|
||||
amdgpu_vm_free_pts(adev, params->vm, &cursor);
|
||||
}
|
||||
|
@ -1704,7 +1693,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||
if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
|
||||
struct dma_fence *tmp = dma_fence_get_stub();
|
||||
|
||||
amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true);
|
||||
amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
|
||||
swap(vm->last_unlocked, tmp);
|
||||
dma_fence_put(tmp);
|
||||
}
|
||||
|
@ -1850,7 +1839,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
|
||||
if (clear || !bo) {
|
||||
mem = NULL;
|
||||
resv = vm->root.base.bo->tbo.base.resv;
|
||||
resv = vm->root.bo->tbo.base.resv;
|
||||
} else {
|
||||
struct drm_gem_object *obj = &bo->tbo.base;
|
||||
|
||||
|
@ -1881,7 +1870,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
}
|
||||
|
||||
if (clear || (bo && bo->tbo.base.resv ==
|
||||
vm->root.base.bo->tbo.base.resv))
|
||||
vm->root.bo->tbo.base.resv))
|
||||
last_update = &vm->last_update;
|
||||
else
|
||||
last_update = &bo_va->last_pt_update;
|
||||
|
@ -1923,7 +1912,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
* the evicted list so that it gets validated again on the
|
||||
* next command submission.
|
||||
*/
|
||||
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
|
||||
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
|
||||
uint32_t mem_type = bo->tbo.resource->mem_type;
|
||||
|
||||
if (!(bo->preferred_domains &
|
||||
|
@ -2060,7 +2049,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
|
|||
*/
|
||||
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
{
|
||||
struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
|
||||
struct dma_resv *resv = vm->root.bo->tbo.base.resv;
|
||||
struct dma_fence *excl, **shared;
|
||||
unsigned i, shared_count;
|
||||
int r;
|
||||
|
@ -2106,7 +2095,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
|||
struct amdgpu_vm *vm,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
|
||||
struct dma_resv *resv = vm->root.bo->tbo.base.resv;
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
uint64_t init_pte_value = 0;
|
||||
struct dma_fence *f = NULL;
|
||||
|
@ -2265,7 +2254,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
|
|||
if (mapping->flags & AMDGPU_PTE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv &&
|
||||
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
|
||||
!bo_va->base.moved) {
|
||||
list_move(&bo_va->base.vm_status, &vm->moved);
|
||||
}
|
||||
|
@ -2627,7 +2616,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|||
struct amdgpu_vm_bo_base **base;
|
||||
|
||||
if (bo) {
|
||||
if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
|
||||
if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
|
||||
vm->bulk_moveable = false;
|
||||
|
||||
for (base = &bo_va->base.bo->vm_bo; *base;
|
||||
|
@ -2721,7 +2710,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
|||
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
|
||||
struct amdgpu_vm *vm = bo_base->vm;
|
||||
|
||||
if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
|
||||
if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
|
||||
amdgpu_vm_bo_evicted(bo_base);
|
||||
continue;
|
||||
}
|
||||
|
@ -2732,7 +2721,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
|||
|
||||
if (bo->tbo.type == ttm_bo_type_kernel)
|
||||
amdgpu_vm_bo_relocated(bo_base);
|
||||
else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
|
||||
else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
|
||||
amdgpu_vm_bo_moved(bo_base);
|
||||
else
|
||||
amdgpu_vm_bo_invalidated(bo_base);
|
||||
|
@ -2862,7 +2851,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
|
|||
*/
|
||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
||||
{
|
||||
timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true,
|
||||
timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true,
|
||||
true, timeout);
|
||||
if (timeout <= 0)
|
||||
return timeout;
|
||||
|
@ -2948,13 +2937,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
|
|||
if (r)
|
||||
goto error_unreserve;
|
||||
|
||||
amdgpu_vm_bo_base_init(&vm->root.base, vm, root_bo);
|
||||
amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
|
||||
|
||||
r = amdgpu_vm_clear_bo(adev, vm, root, false);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
|
||||
amdgpu_bo_unreserve(vm->root.base.bo);
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
|
||||
if (pasid) {
|
||||
unsigned long flags;
|
||||
|
@ -2974,12 +2963,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
|
|||
return 0;
|
||||
|
||||
error_unreserve:
|
||||
amdgpu_bo_unreserve(vm->root.base.bo);
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
|
||||
error_free_root:
|
||||
amdgpu_bo_unref(&root->shadow);
|
||||
amdgpu_bo_unref(&root_bo);
|
||||
vm->root.base.bo = NULL;
|
||||
vm->root.bo = NULL;
|
||||
|
||||
error_free_delayed:
|
||||
dma_fence_put(vm->last_unlocked);
|
||||
|
@ -3005,17 +2994,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
|
|||
* 0 if this VM is clean
|
||||
*/
|
||||
static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm)
|
||||
struct amdgpu_vm *vm)
|
||||
{
|
||||
enum amdgpu_vm_level root = adev->vm_manager.root_level;
|
||||
unsigned int entries = amdgpu_vm_num_entries(adev, root);
|
||||
unsigned int i = 0;
|
||||
|
||||
if (!(vm->root.entries))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < entries; i++) {
|
||||
if (vm->root.entries[i].base.bo)
|
||||
if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -3049,7 +3035,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_reserve(vm->root.base.bo, true);
|
||||
r = amdgpu_bo_reserve(vm->root.bo, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -3077,7 +3063,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
if (pte_support_ats != vm->pte_support_ats) {
|
||||
vm->pte_support_ats = pte_support_ats;
|
||||
r = amdgpu_vm_clear_bo(adev, vm,
|
||||
to_amdgpu_bo_vm(vm->root.base.bo),
|
||||
to_amdgpu_bo_vm(vm->root.bo),
|
||||
false);
|
||||
if (r)
|
||||
goto free_idr;
|
||||
|
@ -3094,7 +3080,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
|
||||
if (vm->use_cpu_for_update) {
|
||||
/* Sync with last SDMA update/clear before switching to CPU */
|
||||
r = amdgpu_bo_sync_wait(vm->root.base.bo,
|
||||
r = amdgpu_bo_sync_wait(vm->root.bo,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, true);
|
||||
if (r)
|
||||
goto free_idr;
|
||||
|
@ -3122,7 +3108,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
}
|
||||
|
||||
/* Free the shadow bo for compute VM */
|
||||
amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.base.bo)->shadow);
|
||||
amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
|
||||
|
||||
if (pasid)
|
||||
vm->pasid = pasid;
|
||||
|
@ -3138,7 +3124,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
}
|
||||
unreserve_bo:
|
||||
amdgpu_bo_unreserve(vm->root.base.bo);
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -3181,7 +3167,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
|
||||
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
|
||||
|
||||
root = amdgpu_bo_ref(vm->root.base.bo);
|
||||
root = amdgpu_bo_ref(vm->root.bo);
|
||||
amdgpu_bo_reserve(root, true);
|
||||
if (vm->pasid) {
|
||||
unsigned long flags;
|
||||
|
@ -3208,7 +3194,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
amdgpu_vm_free_pts(adev, vm, NULL);
|
||||
amdgpu_bo_unreserve(root);
|
||||
amdgpu_bo_unref(&root);
|
||||
WARN_ON(vm->root.base.bo);
|
||||
WARN_ON(vm->root.bo);
|
||||
|
||||
drm_sched_entity_destroy(&vm->immediate);
|
||||
drm_sched_entity_destroy(&vm->delayed);
|
||||
|
@ -3325,7 +3311,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
/* Wait vm idle to make sure the vmid set in SPM_VMID is
|
||||
* not referenced anymore.
|
||||
*/
|
||||
r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true);
|
||||
r = amdgpu_bo_reserve(fpriv->vm.root.bo, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -3333,7 +3319,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
if (r < 0)
|
||||
return r;
|
||||
|
||||
amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
|
||||
amdgpu_bo_unreserve(fpriv->vm.root.bo);
|
||||
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
|
||||
break;
|
||||
default:
|
||||
|
@ -3399,19 +3385,20 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
|||
{
|
||||
bool is_compute_context = false;
|
||||
struct amdgpu_bo *root;
|
||||
unsigned long irqflags;
|
||||
uint64_t value, flags;
|
||||
struct amdgpu_vm *vm;
|
||||
int r;
|
||||
|
||||
spin_lock(&adev->vm_manager.pasid_lock);
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
if (vm) {
|
||||
root = amdgpu_bo_ref(vm->root.base.bo);
|
||||
root = amdgpu_bo_ref(vm->root.bo);
|
||||
is_compute_context = vm->is_compute_context;
|
||||
} else {
|
||||
root = NULL;
|
||||
}
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
|
||||
|
||||
if (!root)
|
||||
return false;
|
||||
|
@ -3429,11 +3416,11 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
|||
goto error_unref;
|
||||
|
||||
/* Double check that the VM still exists */
|
||||
spin_lock(&adev->vm_manager.pasid_lock);
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
if (vm && vm->root.base.bo != root)
|
||||
if (vm && vm->root.bo != root)
|
||||
vm = NULL;
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
|
||||
if (!vm)
|
||||
goto error_unlock;
|
||||
|
||||
|
|
|
@ -152,13 +152,6 @@ struct amdgpu_vm_bo_base {
|
|||
bool moved;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_pt {
|
||||
struct amdgpu_vm_bo_base base;
|
||||
|
||||
/* array of page tables, one for each directory entry */
|
||||
struct amdgpu_vm_pt *entries;
|
||||
};
|
||||
|
||||
/* provided by hw blocks that can write ptes, e.g., sdma */
|
||||
struct amdgpu_vm_pte_funcs {
|
||||
/* number of dw to reserve per operation */
|
||||
|
@ -284,7 +277,7 @@ struct amdgpu_vm {
|
|||
struct list_head done;
|
||||
|
||||
/* contains the page directory */
|
||||
struct amdgpu_vm_pt root;
|
||||
struct amdgpu_vm_bo_base root;
|
||||
struct dma_fence *last_update;
|
||||
|
||||
/* Scheduler entities for page table updates */
|
||||
|
|
|
@ -112,7 +112,7 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
|
|||
swap(p->vm->last_unlocked, f);
|
||||
dma_fence_put(tmp);
|
||||
} else {
|
||||
amdgpu_bo_fence(p->vm->root.base.bo, f, true);
|
||||
amdgpu_bo_fence(p->vm->root.bo, f, true);
|
||||
}
|
||||
|
||||
if (fence && !p->immediate)
|
||||
|
|
|
@ -6970,8 +6970,12 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
|
|||
if (ring->use_doorbell) {
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
|
||||
(adev->doorbell_index.kiq * 2) << 2);
|
||||
/* If GC has entered CGPG, ringing doorbell > first page doesn't
|
||||
* wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
|
||||
* this issue.
|
||||
*/
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
|
||||
(adev->doorbell_index.userqueue_end * 2) << 2);
|
||||
(adev->doorbell.size - 4));
|
||||
}
|
||||
|
||||
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
|
||||
|
|
|
@ -3675,8 +3675,12 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
|
|||
if (ring->use_doorbell) {
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
|
||||
(adev->doorbell_index.kiq * 2) << 2);
|
||||
/* If GC has entered CGPG, ringing doorbell > first page doesn't
|
||||
* wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
|
||||
* this issue.
|
||||
*/
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
|
||||
(adev->doorbell_index.userqueue_end * 2) << 2);
|
||||
(adev->doorbell.size - 4));
|
||||
}
|
||||
|
||||
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
|
||||
|
|
|
@ -332,11 +332,16 @@ struct psp_gfx_uresp_fwar_db_info
|
|||
uint32_t fwar_db_addr_hi;
|
||||
};
|
||||
|
||||
/* Command-specific response for boot config. */
|
||||
struct psp_gfx_uresp_bootcfg {
|
||||
uint32_t boot_cfg; /* boot config data */
|
||||
};
|
||||
|
||||
/* Union of command-specific responses for GPCOM ring. */
|
||||
union psp_gfx_uresp
|
||||
{
|
||||
struct psp_gfx_uresp_reserved reserved;
|
||||
struct psp_gfx_uresp_fwar_db_info fwar_db_info;
|
||||
union psp_gfx_uresp {
|
||||
struct psp_gfx_uresp_reserved reserved;
|
||||
struct psp_gfx_uresp_bootcfg boot_cfg;
|
||||
struct psp_gfx_uresp_fwar_db_info fwar_db_info;
|
||||
};
|
||||
|
||||
/* Structure of GFX Response buffer.
|
||||
|
|
|
@ -461,6 +461,7 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
|
|||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
ring->ring_wptr = 0;
|
||||
ret = psp_v11_0_ring_stop(psp, ring_type);
|
||||
if (ret) {
|
||||
DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
|
||||
|
|
|
@ -227,6 +227,7 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
|
|||
psp_v3_1_reroute_ih(psp);
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
ring->ring_wptr = 0;
|
||||
ret = psp_v3_1_ring_stop(psp, ring_type);
|
||||
if (ret) {
|
||||
DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n");
|
||||
|
|
|
@ -1566,13 +1566,29 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
|
|||
i, args->n_devices);
|
||||
goto unmap_memory_from_gpu_failed;
|
||||
}
|
||||
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
|
||||
args->n_success = i+1;
|
||||
}
|
||||
kfree(devices_arr);
|
||||
|
||||
mutex_unlock(&p->mutex);
|
||||
|
||||
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
|
||||
if (err) {
|
||||
pr_debug("Sync memory failed, wait interrupted by user signal\n");
|
||||
goto sync_memory_failed;
|
||||
}
|
||||
|
||||
/* Flush TLBs after waiting for the page table updates to complete */
|
||||
for (i = 0; i < args->n_devices; i++) {
|
||||
peer = kfd_device_by_id(devices_arr[i]);
|
||||
if (WARN_ON_ONCE(!peer))
|
||||
continue;
|
||||
peer_pdd = kfd_get_process_device_data(peer, p);
|
||||
if (WARN_ON_ONCE(!peer_pdd))
|
||||
continue;
|
||||
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
|
||||
}
|
||||
|
||||
kfree(devices_arr);
|
||||
|
||||
return 0;
|
||||
|
||||
bind_process_to_device_failed:
|
||||
|
@ -1580,6 +1596,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
|
|||
unmap_memory_from_gpu_failed:
|
||||
mutex_unlock(&p->mutex);
|
||||
copy_from_user_failed:
|
||||
sync_memory_failed:
|
||||
kfree(devices_arr);
|
||||
return err;
|
||||
}
|
||||
|
@ -1780,9 +1797,6 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
|
|||
struct kfd_ioctl_svm_args *args = data;
|
||||
int r = 0;
|
||||
|
||||
if (p->svm_disabled)
|
||||
return -EPERM;
|
||||
|
||||
pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n",
|
||||
args->start_addr, args->size, args->op, args->nattr);
|
||||
|
||||
|
|
|
@ -486,9 +486,6 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
|
|||
if (retval == -ETIME)
|
||||
qpd->reset_wavefronts = true;
|
||||
|
||||
|
||||
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
|
||||
|
||||
list_del(&q->list);
|
||||
if (list_empty(&qpd->queues_list)) {
|
||||
if (qpd->reset_wavefronts) {
|
||||
|
@ -523,6 +520,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
|
|||
int retval;
|
||||
uint64_t sdma_val = 0;
|
||||
struct kfd_process_device *pdd = qpd_to_pdd(qpd);
|
||||
struct mqd_manager *mqd_mgr =
|
||||
dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
|
||||
|
||||
/* Get the SDMA queue stats */
|
||||
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
|
||||
|
@ -540,6 +539,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
|
|||
pdd->sdma_past_activity_counter += sdma_val;
|
||||
dqm_unlock(dqm);
|
||||
|
||||
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -1629,7 +1630,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
|
|||
static int process_termination_nocpsch(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
struct queue *q, *next;
|
||||
struct queue *q;
|
||||
struct device_process_node *cur, *next_dpn;
|
||||
int retval = 0;
|
||||
bool found = false;
|
||||
|
@ -1637,12 +1638,19 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
|
|||
dqm_lock(dqm);
|
||||
|
||||
/* Clear all user mode queues */
|
||||
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
|
||||
while (!list_empty(&qpd->queues_list)) {
|
||||
struct mqd_manager *mqd_mgr;
|
||||
int ret;
|
||||
|
||||
q = list_first_entry(&qpd->queues_list, struct queue, list);
|
||||
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
|
||||
q->properties.type)];
|
||||
ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
|
||||
if (ret)
|
||||
retval = ret;
|
||||
dqm_unlock(dqm);
|
||||
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
|
||||
dqm_lock(dqm);
|
||||
}
|
||||
|
||||
/* Unregister process */
|
||||
|
@ -1674,29 +1682,27 @@ static int get_wave_state(struct device_queue_manager *dqm,
|
|||
u32 *save_area_used_size)
|
||||
{
|
||||
struct mqd_manager *mqd_mgr;
|
||||
int r;
|
||||
|
||||
dqm_lock(dqm);
|
||||
|
||||
if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
|
||||
q->properties.is_active || !q->device->cwsr_enabled) {
|
||||
r = -EINVAL;
|
||||
goto dqm_unlock;
|
||||
}
|
||||
|
||||
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
|
||||
|
||||
if (!mqd_mgr->get_wave_state) {
|
||||
r = -EINVAL;
|
||||
goto dqm_unlock;
|
||||
if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
|
||||
q->properties.is_active || !q->device->cwsr_enabled ||
|
||||
!mqd_mgr->get_wave_state) {
|
||||
dqm_unlock(dqm);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
|
||||
ctl_stack_used_size, save_area_used_size);
|
||||
|
||||
dqm_unlock:
|
||||
dqm_unlock(dqm);
|
||||
return r;
|
||||
|
||||
/*
|
||||
* get_wave_state is outside the dqm lock to prevent circular locking
|
||||
* and the queue should be protected against destruction by the process
|
||||
* lock.
|
||||
*/
|
||||
return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
|
||||
ctl_stack_used_size, save_area_used_size);
|
||||
}
|
||||
|
||||
static int process_termination_cpsch(struct device_queue_manager *dqm,
|
||||
|
|
|
@ -405,10 +405,6 @@ int kfd_init_apertures(struct kfd_process *process)
|
|||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
kfd_init_apertures_vi(pdd, id);
|
||||
/* VI GPUs cannot support SVM with only
|
||||
* 40 bits of virtual address space.
|
||||
*/
|
||||
process->svm_disabled = true;
|
||||
break;
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
|
|
|
@ -743,6 +743,7 @@ struct svm_range_list {
|
|||
spinlock_t deferred_list_lock;
|
||||
atomic_t evicted_ranges;
|
||||
struct delayed_work restore_work;
|
||||
DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
|
||||
};
|
||||
|
||||
/* Process data */
|
||||
|
@ -826,7 +827,6 @@ struct kfd_process {
|
|||
|
||||
/* shared virtual memory registered by this process */
|
||||
struct svm_range_list svms;
|
||||
bool svm_disabled;
|
||||
|
||||
bool xnack_enabled;
|
||||
};
|
||||
|
|
|
@ -1260,7 +1260,6 @@ static struct kfd_process *create_process(const struct task_struct *thread)
|
|||
process->mm = thread->mm;
|
||||
process->lead_thread = thread->group_leader;
|
||||
process->n_pdds = 0;
|
||||
process->svm_disabled = false;
|
||||
INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
|
||||
INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
|
||||
process->last_restore_timestamp = get_jiffies_64();
|
||||
|
|
|
@ -281,7 +281,8 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
|
|||
|
||||
p = container_of(svms, struct kfd_process, svms);
|
||||
if (p->xnack_enabled)
|
||||
bitmap_fill(prange->bitmap_access, MAX_GPU_INSTANCE);
|
||||
bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
|
||||
MAX_GPU_INSTANCE);
|
||||
|
||||
svm_range_set_default_attributes(&prange->preferred_loc,
|
||||
&prange->prefetch_loc,
|
||||
|
@ -577,36 +578,25 @@ svm_range_check_attr(struct kfd_process *p,
|
|||
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
|
||||
{
|
||||
uint32_t i;
|
||||
int gpuidx;
|
||||
|
||||
for (i = 0; i < nattr; i++) {
|
||||
uint32_t val = attrs[i].value;
|
||||
int gpuidx = MAX_GPU_INSTANCE;
|
||||
|
||||
switch (attrs[i].type) {
|
||||
case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
|
||||
if (attrs[i].value != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
|
||||
attrs[i].value != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
|
||||
kfd_process_gpuidx_from_gpuid(p,
|
||||
attrs[i].value) < 0) {
|
||||
pr_debug("no GPU 0x%x found\n", attrs[i].value);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
|
||||
val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
|
||||
gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
|
||||
break;
|
||||
case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
|
||||
if (attrs[i].value != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
|
||||
kfd_process_gpuidx_from_gpuid(p,
|
||||
attrs[i].value) < 0) {
|
||||
pr_debug("no GPU 0x%x found\n", attrs[i].value);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
|
||||
gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
|
||||
break;
|
||||
case KFD_IOCTL_SVM_ATTR_ACCESS:
|
||||
case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
|
||||
case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
|
||||
gpuidx = kfd_process_gpuidx_from_gpuid(p,
|
||||
attrs[i].value);
|
||||
if (gpuidx < 0) {
|
||||
pr_debug("no GPU 0x%x found\n", attrs[i].value);
|
||||
return -EINVAL;
|
||||
}
|
||||
gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
|
||||
break;
|
||||
case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
|
||||
break;
|
||||
|
@ -618,6 +608,15 @@ svm_range_check_attr(struct kfd_process *p,
|
|||
pr_debug("unknown attr type 0x%x\n", attrs[i].type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (gpuidx < 0) {
|
||||
pr_debug("no GPU 0x%x found\n", val);
|
||||
return -EINVAL;
|
||||
} else if (gpuidx < MAX_GPU_INSTANCE &&
|
||||
!test_bit(gpuidx, p->svms.bitmap_supported)) {
|
||||
pr_debug("GPU 0x%x not supported\n", val);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1274,7 +1273,7 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
|
|||
adev = (struct amdgpu_device *)pdd->dev->kgd;
|
||||
vm = drm_priv_to_vm(pdd->drm_priv);
|
||||
|
||||
ctx->tv[gpuidx].bo = &vm->root.base.bo->tbo;
|
||||
ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
|
||||
ctx->tv[gpuidx].num_shared = 4;
|
||||
list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
|
||||
}
|
||||
|
@ -1855,7 +1854,7 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
|
|||
|
||||
p = container_of(svms, struct kfd_process, svms);
|
||||
|
||||
for (i = 0; i < p->n_pdds; i++) {
|
||||
for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
|
||||
pdd = p->pdds[i];
|
||||
if (!pdd)
|
||||
continue;
|
||||
|
@ -2325,6 +2324,11 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
|||
bool write_locked = false;
|
||||
int r = 0;
|
||||
|
||||
if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
|
||||
pr_debug("device does not support SVM\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
p = kfd_lookup_process_by_pasid(pasid);
|
||||
if (!p) {
|
||||
pr_debug("kfd process not founded pasid 0x%x\n", pasid);
|
||||
|
@ -2472,6 +2476,7 @@ void svm_range_list_fini(struct kfd_process *p)
|
|||
int svm_range_list_init(struct kfd_process *p)
|
||||
{
|
||||
struct svm_range_list *svms = &p->svms;
|
||||
int i;
|
||||
|
||||
svms->objects = RB_ROOT_CACHED;
|
||||
mutex_init(&svms->lock);
|
||||
|
@ -2482,6 +2487,10 @@ int svm_range_list_init(struct kfd_process *p)
|
|||
INIT_LIST_HEAD(&svms->deferred_range_list);
|
||||
spin_lock_init(&svms->deferred_list_lock);
|
||||
|
||||
for (i = 0; i < p->n_pdds; i++)
|
||||
if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
|
||||
bitmap_set(svms->bitmap_supported, i, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2978,14 +2987,15 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
|
|||
svm_range_set_default_attributes(&location, &prefetch_loc,
|
||||
&granularity, &flags);
|
||||
if (p->xnack_enabled)
|
||||
bitmap_fill(bitmap_access, MAX_GPU_INSTANCE);
|
||||
bitmap_copy(bitmap_access, svms->bitmap_supported,
|
||||
MAX_GPU_INSTANCE);
|
||||
else
|
||||
bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
|
||||
bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
|
||||
goto fill_values;
|
||||
}
|
||||
bitmap_fill(bitmap_access, MAX_GPU_INSTANCE);
|
||||
bitmap_fill(bitmap_aip, MAX_GPU_INSTANCE);
|
||||
bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
|
||||
bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
|
||||
|
||||
while (node) {
|
||||
struct interval_tree_node *next;
|
||||
|
|
|
@ -175,6 +175,11 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
|
|||
void svm_range_free_dma_mappings(struct svm_range *prange);
|
||||
void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm);
|
||||
|
||||
/* SVM API and HMM page migration work together, device memory type
|
||||
* is initialized to not 0 when page migration register device memory.
|
||||
*/
|
||||
#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)
|
||||
|
||||
#else
|
||||
|
||||
struct kfd_process;
|
||||
|
@ -201,6 +206,8 @@ static inline int svm_range_schedule_evict_svm_bo(
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
#define KFD_IS_SVM_API_SUPPORTED(dev) false
|
||||
|
||||
#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
|
||||
|
||||
#endif /* KFD_SVM_H_ */
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "kfd_topology.h"
|
||||
#include "kfd_device_queue_manager.h"
|
||||
#include "kfd_iommu.h"
|
||||
#include "kfd_svm.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
|
@ -1441,10 +1442,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
|
|||
dev->node_props.capability |= (adev->ras_enabled != 0) ?
|
||||
HSA_CAP_RASEVENTNOTIFY : 0;
|
||||
|
||||
/* SVM API and HMM page migration work together, device memory type
|
||||
* is initialized to not 0 when page migration register device memory.
|
||||
*/
|
||||
if (adev->kfd.dev->pgmap.type != 0)
|
||||
if (KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev))
|
||||
dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
|
||||
|
||||
kfd_debug_print_topology();
|
||||
|
|
|
@ -54,8 +54,8 @@
|
|||
#define HSA_CAP_ASIC_REVISION_SHIFT 22
|
||||
#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000
|
||||
#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000
|
||||
|
||||
#define HSA_CAP_RESERVED 0xf00f8000
|
||||
#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000
|
||||
#define HSA_CAP_RESERVED 0xe00f8000
|
||||
|
||||
struct kfd_node_properties {
|
||||
uint64_t hive_id;
|
||||
|
@ -101,8 +101,7 @@ struct kfd_node_properties {
|
|||
|
||||
#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001
|
||||
#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002
|
||||
#define HSA_MEM_FLAGS_COHERENTHOSTACCESS 0x00000004
|
||||
#define HSA_MEM_FLAGS_RESERVED 0xfffffff8
|
||||
#define HSA_MEM_FLAGS_RESERVED 0xfffffffc
|
||||
|
||||
struct kfd_mem_properties {
|
||||
struct list_head list;
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
|
||||
|
||||
ifneq ($(CONFIG_DRM_AMD_DC),)
|
||||
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
|
||||
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o
|
||||
endif
|
||||
|
||||
ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
#if defined(CONFIG_DEBUG_FS)
|
||||
#include "amdgpu_dm_debugfs.h"
|
||||
#endif
|
||||
#include "amdgpu_dm_psr.h"
|
||||
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
|
@ -213,12 +214,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
static void handle_cursor_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_plane_state);
|
||||
|
||||
static void amdgpu_dm_set_psr_caps(struct dc_link *link);
|
||||
static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
|
||||
static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
|
||||
static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
|
||||
static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
|
||||
|
||||
static const struct drm_format_info *
|
||||
amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
|
||||
|
||||
|
@ -10245,6 +10240,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
dm_old_crtc_state->dsc_force_changed == false)
|
||||
continue;
|
||||
|
||||
ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (!new_crtc_state->enable)
|
||||
continue;
|
||||
|
||||
|
@ -10725,136 +10724,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
|
|||
freesync_capable);
|
||||
}
|
||||
|
||||
static void amdgpu_dm_set_psr_caps(struct dc_link *link)
|
||||
{
|
||||
uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
|
||||
|
||||
if (!(link->connector_signal & SIGNAL_TYPE_EDP))
|
||||
return;
|
||||
if (link->type == dc_connection_none)
|
||||
return;
|
||||
if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
|
||||
dpcd_data, sizeof(dpcd_data))) {
|
||||
link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
|
||||
|
||||
if (dpcd_data[0] == 0) {
|
||||
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
|
||||
link->psr_settings.psr_feature_enabled = false;
|
||||
} else {
|
||||
link->psr_settings.psr_version = DC_PSR_VERSION_1;
|
||||
link->psr_settings.psr_feature_enabled = true;
|
||||
}
|
||||
|
||||
DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_dm_link_setup_psr() - configure psr link
|
||||
* @stream: stream state
|
||||
*
|
||||
* Return: true if success
|
||||
*/
|
||||
static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
|
||||
{
|
||||
struct dc_link *link = NULL;
|
||||
struct psr_config psr_config = {0};
|
||||
struct psr_context psr_context = {0};
|
||||
bool ret = false;
|
||||
|
||||
if (stream == NULL)
|
||||
return false;
|
||||
|
||||
link = stream->link;
|
||||
|
||||
psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
|
||||
|
||||
if (psr_config.psr_version > 0) {
|
||||
psr_config.psr_exit_link_training_required = 0x1;
|
||||
psr_config.psr_frame_capture_indication_req = 0;
|
||||
psr_config.psr_rfb_setup_time = 0x37;
|
||||
psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
|
||||
psr_config.allow_smu_optimizations = 0x0;
|
||||
|
||||
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
|
||||
|
||||
}
|
||||
DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_dm_psr_enable() - enable psr f/w
|
||||
* @stream: stream state
|
||||
*
|
||||
* Return: true if success
|
||||
*/
|
||||
bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
|
||||
{
|
||||
struct dc_link *link = stream->link;
|
||||
unsigned int vsync_rate_hz = 0;
|
||||
struct dc_static_screen_params params = {0};
|
||||
/* Calculate number of static frames before generating interrupt to
|
||||
* enter PSR.
|
||||
*/
|
||||
// Init fail safe of 2 frames static
|
||||
unsigned int num_frames_static = 2;
|
||||
|
||||
DRM_DEBUG_DRIVER("Enabling psr...\n");
|
||||
|
||||
vsync_rate_hz = div64_u64(div64_u64((
|
||||
stream->timing.pix_clk_100hz * 100),
|
||||
stream->timing.v_total),
|
||||
stream->timing.h_total);
|
||||
|
||||
/* Round up
|
||||
* Calculate number of frames such that at least 30 ms of time has
|
||||
* passed.
|
||||
*/
|
||||
if (vsync_rate_hz != 0) {
|
||||
unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
|
||||
num_frames_static = (30000 / frame_time_microsec) + 1;
|
||||
}
|
||||
|
||||
params.triggers.cursor_update = true;
|
||||
params.triggers.overlay_update = true;
|
||||
params.triggers.surface_update = true;
|
||||
params.num_frames = num_frames_static;
|
||||
|
||||
dc_stream_set_static_screen_params(link->ctx->dc,
|
||||
&stream, 1,
|
||||
¶ms);
|
||||
|
||||
return dc_link_set_psr_allow_active(link, true, false, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_dm_psr_disable() - disable psr f/w
|
||||
* @stream: stream state
|
||||
*
|
||||
* Return: true if success
|
||||
*/
|
||||
static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
|
||||
{
|
||||
|
||||
DRM_DEBUG_DRIVER("Disabling psr...\n");
|
||||
|
||||
return dc_link_set_psr_allow_active(stream->link, false, true, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_dm_psr_disable() - disable psr f/w
|
||||
* if psr is enabled on any stream
|
||||
*
|
||||
* Return: true if success
|
||||
*/
|
||||
static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
|
||||
{
|
||||
DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
|
||||
return dc_set_psr_allow_active(dm->dc, false);
|
||||
}
|
||||
|
||||
void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
|
||||
{
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
|
|
|
@ -617,6 +617,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
|
|||
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
|
||||
|
||||
void amdgpu_dm_init_color_mod(void);
|
||||
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
|
||||
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
|
||||
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
|
||||
struct dc_plane_state *dc_plane_state);
|
||||
|
|
|
@ -284,6 +284,37 @@ static int __set_input_tf(struct dc_transfer_func *func,
|
|||
return res ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
|
||||
* the expected size.
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
|
||||
{
|
||||
const struct drm_color_lut *lut = NULL;
|
||||
uint32_t size = 0;
|
||||
|
||||
lut = __extract_blob_lut(crtc_state->degamma_lut, &size);
|
||||
if (lut && size != MAX_COLOR_LUT_ENTRIES) {
|
||||
DRM_DEBUG_DRIVER(
|
||||
"Invalid Degamma LUT size. Should be %u but got %u.\n",
|
||||
MAX_COLOR_LUT_ENTRIES, size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
lut = __extract_blob_lut(crtc_state->gamma_lut, &size);
|
||||
if (lut && size != MAX_COLOR_LUT_ENTRIES &&
|
||||
size != MAX_COLOR_LEGACY_LUT_ENTRIES) {
|
||||
DRM_DEBUG_DRIVER(
|
||||
"Invalid Gamma LUT size. Should be %u (or %u for legacy) but got %u.\n",
|
||||
MAX_COLOR_LUT_ENTRIES, MAX_COLOR_LEGACY_LUT_ENTRIES,
|
||||
size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
|
||||
* @crtc: amdgpu_dm crtc state
|
||||
|
@ -317,14 +348,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
|
|||
bool is_legacy;
|
||||
int r;
|
||||
|
||||
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, °amma_size);
|
||||
if (degamma_lut && degamma_size != MAX_COLOR_LUT_ENTRIES)
|
||||
return -EINVAL;
|
||||
r = amdgpu_dm_verify_lut_sizes(&crtc->base);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, °amma_size);
|
||||
regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, ®amma_size);
|
||||
if (regamma_lut && regamma_size != MAX_COLOR_LUT_ENTRIES &&
|
||||
regamma_size != MAX_COLOR_LEGACY_LUT_ENTRIES)
|
||||
return -EINVAL;
|
||||
|
||||
has_degamma =
|
||||
degamma_lut && !__is_lut_linear(degamma_lut, degamma_size);
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include "amdgpu_dm_psr.h"
|
||||
#include "dc.h"
|
||||
#include "dm_helpers.h"
|
||||
|
||||
/*
|
||||
* amdgpu_dm_set_psr_caps() - set link psr capabilities
|
||||
* @link: link
|
||||
*
|
||||
*/
|
||||
void amdgpu_dm_set_psr_caps(struct dc_link *link)
|
||||
{
|
||||
uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
|
||||
|
||||
if (!(link->connector_signal & SIGNAL_TYPE_EDP))
|
||||
return;
|
||||
if (link->type == dc_connection_none)
|
||||
return;
|
||||
if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
|
||||
dpcd_data, sizeof(dpcd_data))) {
|
||||
link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
|
||||
|
||||
if (dpcd_data[0] == 0) {
|
||||
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
|
||||
link->psr_settings.psr_feature_enabled = false;
|
||||
} else {
|
||||
link->psr_settings.psr_version = DC_PSR_VERSION_1;
|
||||
link->psr_settings.psr_feature_enabled = true;
|
||||
}
|
||||
|
||||
DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_dm_link_setup_psr() - configure psr link
|
||||
* @stream: stream state
|
||||
*
|
||||
* Return: true if success
|
||||
*/
|
||||
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
|
||||
{
|
||||
struct dc_link *link = NULL;
|
||||
struct psr_config psr_config = {0};
|
||||
struct psr_context psr_context = {0};
|
||||
struct dc *dc = NULL;
|
||||
bool ret = false;
|
||||
|
||||
if (stream == NULL)
|
||||
return false;
|
||||
|
||||
link = stream->link;
|
||||
dc = link->ctx->dc;
|
||||
|
||||
psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
|
||||
|
||||
if (psr_config.psr_version > 0) {
|
||||
psr_config.psr_exit_link_training_required = 0x1;
|
||||
psr_config.psr_frame_capture_indication_req = 0;
|
||||
psr_config.psr_rfb_setup_time = 0x37;
|
||||
psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
|
||||
psr_config.allow_smu_optimizations = 0x0;
|
||||
|
||||
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
|
||||
|
||||
}
|
||||
DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_dm_psr_enable() - enable psr f/w
|
||||
* @stream: stream state
|
||||
*
|
||||
* Return: true if success
|
||||
*/
|
||||
bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
|
||||
{
|
||||
struct dc_link *link = stream->link;
|
||||
unsigned int vsync_rate_hz = 0;
|
||||
struct dc_static_screen_params params = {0};
|
||||
/* Calculate number of static frames before generating interrupt to
|
||||
* enter PSR.
|
||||
*/
|
||||
// Init fail safe of 2 frames static
|
||||
unsigned int num_frames_static = 2;
|
||||
|
||||
DRM_DEBUG_DRIVER("Enabling psr...\n");
|
||||
|
||||
vsync_rate_hz = div64_u64(div64_u64((
|
||||
stream->timing.pix_clk_100hz * 100),
|
||||
stream->timing.v_total),
|
||||
stream->timing.h_total);
|
||||
|
||||
/* Round up
|
||||
* Calculate number of frames such that at least 30 ms of time has
|
||||
* passed.
|
||||
*/
|
||||
if (vsync_rate_hz != 0) {
|
||||
unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
|
||||
num_frames_static = (30000 / frame_time_microsec) + 1;
|
||||
}
|
||||
|
||||
params.triggers.cursor_update = true;
|
||||
params.triggers.overlay_update = true;
|
||||
params.triggers.surface_update = true;
|
||||
params.num_frames = num_frames_static;
|
||||
|
||||
dc_stream_set_static_screen_params(link->ctx->dc,
|
||||
&stream, 1,
|
||||
¶ms);
|
||||
|
||||
return dc_link_set_psr_allow_active(link, true, false, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_dm_psr_disable() - disable psr f/w
|
||||
* @stream: stream state
|
||||
*
|
||||
* Return: true if success
|
||||
*/
|
||||
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
|
||||
{
|
||||
|
||||
DRM_DEBUG_DRIVER("Disabling psr...\n");
|
||||
|
||||
return dc_link_set_psr_allow_active(stream->link, false, true, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_dm_psr_disable() - disable psr f/w
|
||||
* if psr is enabled on any stream
|
||||
*
|
||||
* Return: true if success
|
||||
*/
|
||||
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
|
||||
{
|
||||
DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
|
||||
return dc_set_psr_allow_active(dm->dc, false);
|
||||
}
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef AMDGPU_DM_AMDGPU_DM_PSR_H_
|
||||
#define AMDGPU_DM_AMDGPU_DM_PSR_H_
|
||||
|
||||
#include "amdgpu.h"
|
||||
|
||||
void amdgpu_dm_set_psr_caps(struct dc_link *link);
|
||||
bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
|
||||
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
|
||||
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
|
||||
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
|
||||
|
||||
#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
|
|
@ -60,7 +60,7 @@ include $(AMD_DC)
|
|||
|
||||
DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
|
||||
dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
|
||||
dc_link_enc_cfg.o
|
||||
dc_link_enc_cfg.o dc_link_dpcd.o
|
||||
|
||||
ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
DISPLAY_CORE += dc_vm_helper.o
|
||||
|
|
|
@ -123,7 +123,7 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
|
|||
}
|
||||
}
|
||||
|
||||
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
|
||||
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct dc_state *context)
|
||||
{
|
||||
int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
|
||||
* clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz;
|
||||
|
@ -132,6 +132,68 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
|
|||
|
||||
uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
|
||||
uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider);
|
||||
uint32_t current_dispclk_wdivider;
|
||||
uint32_t i;
|
||||
|
||||
REG_GET(DENTIST_DISPCLK_CNTL,
|
||||
DENTIST_DISPCLK_WDIVIDER, ¤t_dispclk_wdivider);
|
||||
|
||||
/* When changing divider to or from 127, some extra programming is required to prevent corruption */
|
||||
if (current_dispclk_wdivider == 127 && dispclk_wdivider != 127) {
|
||||
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
uint32_t fifo_level;
|
||||
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
|
||||
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
|
||||
int32_t N;
|
||||
int32_t j;
|
||||
|
||||
if (!pipe_ctx->stream)
|
||||
continue;
|
||||
/* Virtual encoders don't have this function */
|
||||
if (!stream_enc->funcs->get_fifo_cal_average_level)
|
||||
continue;
|
||||
fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
|
||||
stream_enc);
|
||||
N = fifo_level / 4;
|
||||
dccg->funcs->set_fifo_errdet_ovr_en(
|
||||
dccg,
|
||||
true);
|
||||
for (j = 0; j < N - 4; j++)
|
||||
dccg->funcs->otg_drop_pixel(
|
||||
dccg,
|
||||
pipe_ctx->stream_res.tg->inst);
|
||||
dccg->funcs->set_fifo_errdet_ovr_en(
|
||||
dccg,
|
||||
false);
|
||||
}
|
||||
} else if (dispclk_wdivider == 127 && current_dispclk_wdivider != 127) {
|
||||
REG_UPDATE(DENTIST_DISPCLK_CNTL,
|
||||
DENTIST_DISPCLK_WDIVIDER, 126);
|
||||
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 100);
|
||||
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
|
||||
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
|
||||
uint32_t fifo_level;
|
||||
int32_t N;
|
||||
int32_t j;
|
||||
|
||||
if (!pipe_ctx->stream)
|
||||
continue;
|
||||
/* Virtual encoders don't have this function */
|
||||
if (!stream_enc->funcs->get_fifo_cal_average_level)
|
||||
continue;
|
||||
fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
|
||||
stream_enc);
|
||||
N = fifo_level / 4;
|
||||
dccg->funcs->set_fifo_errdet_ovr_en(dccg, true);
|
||||
for (j = 0; j < 12 - N; j++)
|
||||
dccg->funcs->otg_add_pixel(dccg,
|
||||
pipe_ctx->stream_res.tg->inst);
|
||||
dccg->funcs->set_fifo_errdet_ovr_en(dccg, false);
|
||||
}
|
||||
}
|
||||
|
||||
REG_UPDATE(DENTIST_DISPCLK_CNTL,
|
||||
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
|
||||
|
@ -251,11 +313,11 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
if (dpp_clock_lowered) {
|
||||
// if clock is being lowered, increase DTO before lowering refclk
|
||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||
dcn20_update_clocks_update_dentist(clk_mgr);
|
||||
dcn20_update_clocks_update_dentist(clk_mgr, context);
|
||||
} else {
|
||||
// if clock is being raised, increase refclk before lowering DTO
|
||||
if (update_dppclk || update_dispclk)
|
||||
dcn20_update_clocks_update_dentist(clk_mgr);
|
||||
dcn20_update_clocks_update_dentist(clk_mgr, context);
|
||||
// always update dtos unless clock is lowered and not safe to lower
|
||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||
}
|
||||
|
|
|
@ -50,7 +50,8 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
|
|||
enum dc_clock_type clock_type,
|
||||
struct dc_clock_config *clock_cfg);
|
||||
|
||||
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
|
||||
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr,
|
||||
struct dc_state *context);
|
||||
|
||||
void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base);
|
||||
|
||||
|
|
|
@ -334,11 +334,11 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
if (dpp_clock_lowered) {
|
||||
/* if clock is being lowered, increase DTO before lowering refclk */
|
||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||
dcn20_update_clocks_update_dentist(clk_mgr);
|
||||
dcn20_update_clocks_update_dentist(clk_mgr, context);
|
||||
} else {
|
||||
/* if clock is being raised, increase refclk before lowering DTO */
|
||||
if (update_dppclk || update_dispclk)
|
||||
dcn20_update_clocks_update_dentist(clk_mgr);
|
||||
dcn20_update_clocks_update_dentist(clk_mgr, context);
|
||||
/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
|
||||
* that we do not lower dto when it is not safe to lower. We do not need to
|
||||
* compare the current and new dppclk before calling this function.*/
|
||||
|
|
|
@ -409,13 +409,13 @@ void get_surface_tile_visual_confirm_color(
|
|||
struct tg_color *color)
|
||||
{
|
||||
uint32_t color_value = MAX_TG_COLOR_VALUE;
|
||||
/* Determine the overscan color based on the top-most (desktop) plane's context */
|
||||
struct pipe_ctx *top_pipe_ctx = pipe_ctx;
|
||||
/* Determine the overscan color based on the bottom-most plane's context */
|
||||
struct pipe_ctx *bottom_pipe_ctx = pipe_ctx;
|
||||
|
||||
while (top_pipe_ctx->top_pipe != NULL)
|
||||
top_pipe_ctx = top_pipe_ctx->top_pipe;
|
||||
while (bottom_pipe_ctx->bottom_pipe != NULL)
|
||||
bottom_pipe_ctx = bottom_pipe_ctx->bottom_pipe;
|
||||
|
||||
switch (top_pipe_ctx->plane_state->tiling_info.gfx9.swizzle) {
|
||||
switch (bottom_pipe_ctx->plane_state->tiling_info.gfx9.swizzle) {
|
||||
case DC_SW_LINEAR:
|
||||
/* LINEAR Surface - set border color to red */
|
||||
color->color_r_cr = color_value;
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#include "dmub/dmub_srv.h"
|
||||
#include "inc/hw/panel_cntl.h"
|
||||
#include "inc/link_enc_cfg.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
|
@ -59,20 +60,6 @@
|
|||
#define RETIMER_REDRIVER_INFO(...) \
|
||||
DC_LOG_RETIMER_REDRIVER( \
|
||||
__VA_ARGS__)
|
||||
/*******************************************************************************
|
||||
* Private structures
|
||||
******************************************************************************/
|
||||
|
||||
enum {
|
||||
PEAK_FACTOR_X1000 = 1006,
|
||||
/*
|
||||
* Some receivers fail to train on first try and are good
|
||||
* on subsequent tries. 2 retries should be plenty. If we
|
||||
* don't have a successful training then we don't expect to
|
||||
* ever get one.
|
||||
*/
|
||||
LINK_TRAINING_MAX_VERIFY_RETRY = 2
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* Private functions
|
||||
|
@ -718,11 +705,9 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
|
|||
|
||||
static bool detect_dp(struct dc_link *link,
|
||||
struct display_sink_capability *sink_caps,
|
||||
bool *converter_disable_audio,
|
||||
struct audio_support *audio_support,
|
||||
enum dc_detect_reason reason)
|
||||
{
|
||||
bool boot = false;
|
||||
struct audio_support *audio_support = &link->dc->res_pool->audio_support;
|
||||
|
||||
sink_caps->signal = link_detect_sink(link, reason);
|
||||
sink_caps->transaction_type =
|
||||
|
@ -745,60 +730,12 @@ static bool detect_dp(struct dc_link *link,
|
|||
* of this function). */
|
||||
query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link);
|
||||
#endif
|
||||
/*
|
||||
* This call will initiate MST topology discovery. Which
|
||||
* will detect MST ports and add new DRM connector DRM
|
||||
* framework. Then read EDID via remote i2c over aux. In
|
||||
* the end, will notify DRM detect result and save EDID
|
||||
* into DRM framework.
|
||||
*
|
||||
* .detect is called by .fill_modes.
|
||||
* .fill_modes is called by user mode ioctl
|
||||
* DRM_IOCTL_MODE_GETCONNECTOR.
|
||||
*
|
||||
* .get_modes is called by .fill_modes.
|
||||
*
|
||||
* call .get_modes, AMDGPU DM implementation will create
|
||||
* new dc_sink and add to dc_link. For long HPD plug
|
||||
* in/out, MST has its own handle.
|
||||
*
|
||||
* Therefore, just after dc_create, link->sink is not
|
||||
* created for MST until user mode app calls
|
||||
* DRM_IOCTL_MODE_GETCONNECTOR.
|
||||
*
|
||||
* Need check ->sink usages in case ->sink = NULL
|
||||
* TODO: s3 resume check
|
||||
*/
|
||||
if (reason == DETECT_REASON_BOOT)
|
||||
boot = true;
|
||||
|
||||
dm_helpers_dp_update_branch_info(link->ctx, link);
|
||||
|
||||
if (!dm_helpers_dp_mst_start_top_mgr(link->ctx,
|
||||
link, boot)) {
|
||||
/* MST not supported */
|
||||
link->type = dc_connection_single;
|
||||
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
|
||||
}
|
||||
}
|
||||
|
||||
if (link->type != dc_connection_mst_branch &&
|
||||
is_dp_branch_device(link)) {
|
||||
is_dp_branch_device(link))
|
||||
/* DP SST branch */
|
||||
link->type = dc_connection_sst_branch;
|
||||
if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
|
||||
/*
|
||||
* SST branch unplug processing for short irq
|
||||
*/
|
||||
link_disconnect_sink(link);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (is_dp_active_dongle(link) &&
|
||||
(link->dpcd_caps.dongle_type !=
|
||||
DISPLAY_DONGLE_DP_HDMI_CONVERTER))
|
||||
*converter_disable_audio = true;
|
||||
}
|
||||
} else {
|
||||
/* DP passive dongles */
|
||||
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
|
||||
|
@ -893,7 +830,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
|||
struct dc_sink *sink = NULL;
|
||||
struct dc_sink *prev_sink = NULL;
|
||||
struct dpcd_caps prev_dpcd_caps;
|
||||
bool same_dpcd = true;
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
enum dc_connection_type pre_connection_type = dc_connection_none;
|
||||
bool perform_dp_seamless_boot = false;
|
||||
|
@ -904,9 +840,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
|||
if (dc_is_virtual_signal(link->connector_signal))
|
||||
return false;
|
||||
|
||||
if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
|
||||
link->connector_signal == SIGNAL_TYPE_EDP) &&
|
||||
link->local_sink) {
|
||||
if (((link->connector_signal == SIGNAL_TYPE_LVDS ||
|
||||
link->connector_signal == SIGNAL_TYPE_EDP) &&
|
||||
(!link->dc->config.allow_edp_hotplug_detection)) &&
|
||||
link->local_sink) {
|
||||
// need to re-write OUI and brightness in resume case
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
dpcd_set_source_specific_data(link);
|
||||
|
@ -983,35 +920,12 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!detect_dp(link, &sink_caps,
|
||||
&converter_disable_audio,
|
||||
aud_support, reason)) {
|
||||
if (!detect_dp(link, &sink_caps, reason)) {
|
||||
if (prev_sink)
|
||||
dc_sink_release(prev_sink);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if dpcp block is the same
|
||||
if (prev_sink) {
|
||||
if (memcmp(&link->dpcd_caps, &prev_dpcd_caps,
|
||||
sizeof(struct dpcd_caps)))
|
||||
same_dpcd = false;
|
||||
}
|
||||
/* Active SST downstream branch device unplug*/
|
||||
if (link->type == dc_connection_sst_branch &&
|
||||
link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
|
||||
if (prev_sink)
|
||||
/* Downstream unplug */
|
||||
dc_sink_release(prev_sink);
|
||||
return true;
|
||||
}
|
||||
|
||||
// link switch from MST to non-MST stop topology manager
|
||||
if (pre_connection_type == dc_connection_mst_branch &&
|
||||
link->type != dc_connection_mst_branch) {
|
||||
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
|
||||
}
|
||||
|
||||
if (link->type == dc_connection_mst_branch) {
|
||||
LINK_INFO("link=%d, mst branch is now Connected\n",
|
||||
link->link_index);
|
||||
|
@ -1022,15 +936,69 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
|||
*/
|
||||
dp_verify_mst_link_cap(link);
|
||||
|
||||
if (prev_sink)
|
||||
dc_sink_release(prev_sink);
|
||||
return false;
|
||||
/*
|
||||
* This call will initiate MST topology discovery. Which
|
||||
* will detect MST ports and add new DRM connector DRM
|
||||
* framework. Then read EDID via remote i2c over aux. In
|
||||
* the end, will notify DRM detect result and save EDID
|
||||
* into DRM framework.
|
||||
*
|
||||
* .detect is called by .fill_modes.
|
||||
* .fill_modes is called by user mode ioctl
|
||||
* DRM_IOCTL_MODE_GETCONNECTOR.
|
||||
*
|
||||
* .get_modes is called by .fill_modes.
|
||||
*
|
||||
* call .get_modes, AMDGPU DM implementation will create
|
||||
* new dc_sink and add to dc_link. For long HPD plug
|
||||
* in/out, MST has its own handle.
|
||||
*
|
||||
* Therefore, just after dc_create, link->sink is not
|
||||
* created for MST until user mode app calls
|
||||
* DRM_IOCTL_MODE_GETCONNECTOR.
|
||||
*
|
||||
* Need check ->sink usages in case ->sink = NULL
|
||||
* TODO: s3 resume check
|
||||
*/
|
||||
|
||||
dm_helpers_dp_update_branch_info(link->ctx, link);
|
||||
if (dm_helpers_dp_mst_start_top_mgr(link->ctx,
|
||||
link, reason == DETECT_REASON_BOOT)) {
|
||||
if (prev_sink)
|
||||
dc_sink_release(prev_sink);
|
||||
return false;
|
||||
} else {
|
||||
link->type = dc_connection_sst_branch;
|
||||
sink_caps.signal = SIGNAL_TYPE_DISPLAY_PORT;
|
||||
}
|
||||
}
|
||||
|
||||
/* Active SST downstream branch device unplug*/
|
||||
if (link->type == dc_connection_sst_branch &&
|
||||
link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
|
||||
if (prev_sink)
|
||||
/* Downstream unplug */
|
||||
dc_sink_release(prev_sink);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* disable audio for non DP to HDMI active sst converter */
|
||||
if (link->type == dc_connection_sst_branch &&
|
||||
is_dp_active_dongle(link) &&
|
||||
(link->dpcd_caps.dongle_type !=
|
||||
DISPLAY_DONGLE_DP_HDMI_CONVERTER))
|
||||
converter_disable_audio = true;
|
||||
|
||||
// link switch from MST to non-MST stop topology manager
|
||||
if (pre_connection_type == dc_connection_mst_branch &&
|
||||
link->type != dc_connection_mst_branch)
|
||||
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
|
||||
|
||||
|
||||
// For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
|
||||
if (reason == DETECT_REASON_BOOT &&
|
||||
!dc_ctx->dc->config.power_down_display_on_boot &&
|
||||
link->link_status.link_active)
|
||||
!dc_ctx->dc->config.power_down_display_on_boot &&
|
||||
link->link_status.link_active)
|
||||
perform_dp_seamless_boot = true;
|
||||
|
||||
if (perform_dp_seamless_boot) {
|
||||
|
@ -1213,11 +1181,11 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
|||
link->dongle_max_pix_clk = 0;
|
||||
}
|
||||
|
||||
LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
|
||||
LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n",
|
||||
link->link_index, sink,
|
||||
(sink_caps.signal ==
|
||||
SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"),
|
||||
prev_sink, same_dpcd, same_edid);
|
||||
prev_sink, same_edid);
|
||||
|
||||
if (prev_sink)
|
||||
dc_sink_release(prev_sink);
|
||||
|
@ -1501,7 +1469,8 @@ static bool dc_link_construct(struct dc_link *link,
|
|||
link->connector_signal = SIGNAL_TYPE_EDP;
|
||||
|
||||
if (link->hpd_gpio) {
|
||||
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
|
||||
if (!link->dc->config.allow_edp_hotplug_detection)
|
||||
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
|
||||
link->irq_source_hpd_rx =
|
||||
dal_irq_get_rx_source(link->hpd_gpio);
|
||||
}
|
||||
|
|
|
@ -25,6 +25,8 @@ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
|
|||
link->ctx->logger
|
||||
#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
|
||||
|
||||
#include "link_dpcd.h"
|
||||
|
||||
/* maximum pre emphasis level allowed for each voltage swing level*/
|
||||
static const enum dc_pre_emphasis
|
||||
voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3,
|
||||
|
@ -1618,11 +1620,10 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train
|
|||
{
|
||||
enum dc_status status = DC_OK;
|
||||
|
||||
if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT)
|
||||
status = configure_lttpr_mode_transparent(link);
|
||||
|
||||
else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
|
||||
if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
|
||||
status = configure_lttpr_mode_non_transparent(link, lt_settings);
|
||||
else
|
||||
status = configure_lttpr_mode_transparent(link);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -1806,7 +1807,7 @@ bool perform_link_training_with_retries(
|
|||
enum dp_panel_mode panel_mode;
|
||||
struct link_encoder *link_enc;
|
||||
enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
|
||||
struct dc_link_settings currnet_setting = *link_setting;
|
||||
struct dc_link_settings current_setting = *link_setting;
|
||||
|
||||
/* Dynamically assigned link encoders associated with stream rather than
|
||||
* link.
|
||||
|
@ -1832,7 +1833,7 @@ bool perform_link_training_with_retries(
|
|||
link,
|
||||
signal,
|
||||
pipe_ctx->clock_source->id,
|
||||
&currnet_setting);
|
||||
¤t_setting);
|
||||
|
||||
if (stream->sink_patches.dppowerup_delay > 0) {
|
||||
int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
|
||||
|
@ -1847,12 +1848,12 @@ bool perform_link_training_with_retries(
|
|||
panel_mode != DP_PANEL_MODE_DEFAULT);
|
||||
|
||||
if (link->aux_access_disabled) {
|
||||
dc_link_dp_perform_link_training_skip_aux(link, &currnet_setting);
|
||||
dc_link_dp_perform_link_training_skip_aux(link, ¤t_setting);
|
||||
return true;
|
||||
} else {
|
||||
status = dc_link_dp_perform_link_training(
|
||||
link,
|
||||
&currnet_setting,
|
||||
¤t_setting,
|
||||
skip_video_pattern);
|
||||
if (status == LINK_TRAINING_SUCCESS)
|
||||
return true;
|
||||
|
@ -1872,12 +1873,12 @@ bool perform_link_training_with_retries(
|
|||
if (status == LINK_TRAINING_ABORT)
|
||||
break;
|
||||
else if (do_fallback) {
|
||||
decide_fallback_link_setting(*link_setting, &currnet_setting, status);
|
||||
decide_fallback_link_setting(*link_setting, ¤t_setting, status);
|
||||
/* Fail link training if reduced link bandwidth no longer meets
|
||||
* stream requirements.
|
||||
*/
|
||||
if (dc_bandwidth_in_kbps_from_timing(&stream->timing) <
|
||||
dc_link_bandwidth_kbps(link, &currnet_setting))
|
||||
dc_link_bandwidth_kbps(link, ¤t_setting))
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -3619,79 +3620,16 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool retrieve_link_cap(struct dc_link *link)
|
||||
bool dp_retrieve_lttpr_cap(struct dc_link *link)
|
||||
{
|
||||
/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
|
||||
* which means size 16 will be good for both of those DPCD register block reads
|
||||
*/
|
||||
uint8_t dpcd_data[16];
|
||||
uint8_t lttpr_dpcd_data[6];
|
||||
|
||||
/*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
|
||||
*/
|
||||
uint8_t dpcd_dprx_data = '\0';
|
||||
uint8_t dpcd_power_state = '\0';
|
||||
|
||||
struct dp_device_vendor_id sink_id;
|
||||
union down_stream_port_count down_strm_port_count;
|
||||
union edp_configuration_cap edp_config_cap;
|
||||
union dp_downstream_port_present ds_port = { 0 };
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
uint32_t read_dpcd_retry_cnt = 3;
|
||||
int i;
|
||||
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
|
||||
bool is_lttpr_present = false;
|
||||
const uint32_t post_oui_delay = 30; // 30ms
|
||||
bool vbios_lttpr_enable = false;
|
||||
bool vbios_lttpr_interop = false;
|
||||
struct dc_bios *bios = link->dc->ctx->dc_bios;
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
bool is_lttpr_present = false;
|
||||
|
||||
memset(dpcd_data, '\0', sizeof(dpcd_data));
|
||||
memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
|
||||
memset(&down_strm_port_count,
|
||||
'\0', sizeof(union down_stream_port_count));
|
||||
memset(&edp_config_cap, '\0',
|
||||
sizeof(union edp_configuration_cap));
|
||||
|
||||
/* if extended timeout is supported in hardware,
|
||||
* default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
|
||||
* CTS 4.2.1.1 regression introduced by CTS specs requirement update.
|
||||
*/
|
||||
dc_link_aux_try_to_configure_timeout(link->ddc,
|
||||
LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
|
||||
|
||||
status = core_link_read_dpcd(link, DP_SET_POWER,
|
||||
&dpcd_power_state, sizeof(dpcd_power_state));
|
||||
|
||||
/* Delay 1 ms if AUX CH is in power down state. Based on spec
|
||||
* section 2.3.1.2, if AUX CH may be powered down due to
|
||||
* write to DPCD 600h = 2. Sink AUX CH is monitoring differential
|
||||
* signal and may need up to 1 ms before being able to reply.
|
||||
*/
|
||||
if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3)
|
||||
udelay(1000);
|
||||
|
||||
dpcd_set_source_specific_data(link);
|
||||
/* Sink may need to configure internals based on vendor, so allow some
|
||||
* time before proceeding with possibly vendor specific transactions
|
||||
*/
|
||||
msleep(post_oui_delay);
|
||||
|
||||
for (i = 0; i < read_dpcd_retry_cnt; i++) {
|
||||
status = core_link_read_dpcd(
|
||||
link,
|
||||
DP_DPCD_REV,
|
||||
dpcd_data,
|
||||
sizeof(dpcd_data));
|
||||
if (status == DC_OK)
|
||||
break;
|
||||
}
|
||||
|
||||
if (status != DC_OK) {
|
||||
dm_error("%s: Read dpcd data failed.\n", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Query BIOS to determine if LTTPR functionality is forced on by system */
|
||||
if (bios->funcs->get_lttpr_caps) {
|
||||
enum bp_result bp_query_result;
|
||||
|
@ -3763,21 +3701,91 @@ static bool retrieve_link_cap(struct dc_link *link)
|
|||
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
|
||||
|
||||
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
|
||||
is_lttpr_present = (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 &&
|
||||
link->dpcd_caps.lttpr_caps.phy_repeater_cnt < 0xff &&
|
||||
is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
|
||||
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
|
||||
link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
|
||||
link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
|
||||
if (is_lttpr_present)
|
||||
if (is_lttpr_present) {
|
||||
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
|
||||
else
|
||||
configure_lttpr_mode_transparent(link);
|
||||
} else
|
||||
link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
|
||||
}
|
||||
return is_lttpr_present;
|
||||
}
|
||||
|
||||
static bool retrieve_link_cap(struct dc_link *link)
|
||||
{
|
||||
/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
|
||||
* which means size 16 will be good for both of those DPCD register block reads
|
||||
*/
|
||||
uint8_t dpcd_data[16];
|
||||
/*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
|
||||
*/
|
||||
uint8_t dpcd_dprx_data = '\0';
|
||||
uint8_t dpcd_power_state = '\0';
|
||||
|
||||
struct dp_device_vendor_id sink_id;
|
||||
union down_stream_port_count down_strm_port_count;
|
||||
union edp_configuration_cap edp_config_cap;
|
||||
union dp_downstream_port_present ds_port = { 0 };
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
uint32_t read_dpcd_retry_cnt = 3;
|
||||
int i;
|
||||
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
|
||||
const uint32_t post_oui_delay = 30; // 30ms
|
||||
bool is_lttpr_present = false;
|
||||
|
||||
memset(dpcd_data, '\0', sizeof(dpcd_data));
|
||||
memset(&down_strm_port_count,
|
||||
'\0', sizeof(union down_stream_port_count));
|
||||
memset(&edp_config_cap, '\0',
|
||||
sizeof(union edp_configuration_cap));
|
||||
|
||||
/* if extended timeout is supported in hardware,
|
||||
* default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
|
||||
* CTS 4.2.1.1 regression introduced by CTS specs requirement update.
|
||||
*/
|
||||
dc_link_aux_try_to_configure_timeout(link->ddc,
|
||||
LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
|
||||
|
||||
is_lttpr_present = dp_retrieve_lttpr_cap(link);
|
||||
|
||||
status = core_link_read_dpcd(link, DP_SET_POWER,
|
||||
&dpcd_power_state, sizeof(dpcd_power_state));
|
||||
|
||||
/* Delay 1 ms if AUX CH is in power down state. Based on spec
|
||||
* section 2.3.1.2, if AUX CH may be powered down due to
|
||||
* write to DPCD 600h = 2. Sink AUX CH is monitoring differential
|
||||
* signal and may need up to 1 ms before being able to reply.
|
||||
*/
|
||||
if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3)
|
||||
udelay(1000);
|
||||
|
||||
dpcd_set_source_specific_data(link);
|
||||
/* Sink may need to configure internals based on vendor, so allow some
|
||||
* time before proceeding with possibly vendor specific transactions
|
||||
*/
|
||||
msleep(post_oui_delay);
|
||||
|
||||
for (i = 0; i < read_dpcd_retry_cnt; i++) {
|
||||
status = core_link_read_dpcd(
|
||||
link,
|
||||
DP_DPCD_REV,
|
||||
dpcd_data,
|
||||
sizeof(dpcd_data));
|
||||
if (status == DC_OK)
|
||||
break;
|
||||
}
|
||||
|
||||
if (status != DC_OK) {
|
||||
dm_error("%s: Read receiver caps dpcd data failed.\n", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!is_lttpr_present)
|
||||
dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
|
||||
|
||||
|
||||
{
|
||||
union training_aux_rd_interval aux_rd_interval;
|
||||
|
||||
|
|
|
@ -0,0 +1,218 @@
|
|||
#include <inc/core_status.h>
|
||||
#include <dc_link.h>
|
||||
#include <inc/link_hwss.h>
|
||||
#include <inc/link_dpcd.h>
|
||||
#include "drm/drm_dp_helper.h"
|
||||
#include <dc_dp_types.h>
|
||||
#include "dm_helpers.h"
|
||||
|
||||
#define END_ADDRESS(start, size) (start + size - 1)
|
||||
#define ADDRESS_RANGE_SIZE(start, end) (end - start + 1)
|
||||
struct dpcd_address_range {
|
||||
uint32_t start;
|
||||
uint32_t end;
|
||||
};
|
||||
|
||||
static enum dc_status internal_link_read_dpcd(
|
||||
struct dc_link *link,
|
||||
uint32_t address,
|
||||
uint8_t *data,
|
||||
uint32_t size)
|
||||
{
|
||||
if (!link->aux_access_disabled &&
|
||||
!dm_helpers_dp_read_dpcd(link->ctx,
|
||||
link, address, data, size)) {
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
return DC_OK;
|
||||
}
|
||||
|
||||
static enum dc_status internal_link_write_dpcd(
|
||||
struct dc_link *link,
|
||||
uint32_t address,
|
||||
const uint8_t *data,
|
||||
uint32_t size)
|
||||
{
|
||||
if (!link->aux_access_disabled &&
|
||||
!dm_helpers_dp_write_dpcd(link->ctx,
|
||||
link, address, data, size)) {
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
return DC_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Partition the entire DPCD address space
|
||||
* XXX: This partitioning must cover the entire DPCD address space,
|
||||
* and must contain no gaps or overlapping address ranges.
|
||||
*/
|
||||
static const struct dpcd_address_range mandatory_dpcd_partitions[] = {
|
||||
{ 0, DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1) - 1},
|
||||
{ DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2) - 1 },
|
||||
{ DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3) - 1 },
|
||||
{ DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4) - 1 },
|
||||
{ DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5) - 1 },
|
||||
{ DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6) - 1 },
|
||||
{ DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7) - 1 },
|
||||
{ DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8) - 1 },
|
||||
{ DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 },
|
||||
/*
|
||||
* The FEC registers are contiguous
|
||||
*/
|
||||
{ DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 },
|
||||
{ DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2) - 1 },
|
||||
{ DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3) - 1 },
|
||||
{ DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4) - 1 },
|
||||
{ DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5) - 1 },
|
||||
{ DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6) - 1 },
|
||||
{ DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7) - 1 },
|
||||
{ DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR8), DP_LTTPR_MAX_ADD },
|
||||
/* all remaining DPCD addresses */
|
||||
{ DP_LTTPR_MAX_ADD + 1, DP_DPCD_MAX_ADD } };
|
||||
|
||||
static inline bool do_addresses_intersect_with_range(
|
||||
const struct dpcd_address_range *range,
|
||||
const uint32_t start_address,
|
||||
const uint32_t end_address)
|
||||
{
|
||||
return start_address <= range->end && end_address >= range->start;
|
||||
}
|
||||
|
||||
static uint32_t dpcd_get_next_partition_size(const uint32_t address, const uint32_t size)
|
||||
{
|
||||
const uint32_t end_address = END_ADDRESS(address, size);
|
||||
uint32_t partition_iterator = 0;
|
||||
|
||||
/*
|
||||
* find current partition
|
||||
* this loop spins forever if partition map above is not surjective
|
||||
*/
|
||||
while (!do_addresses_intersect_with_range(&mandatory_dpcd_partitions[partition_iterator],
|
||||
address, end_address))
|
||||
partition_iterator++;
|
||||
if (end_address < mandatory_dpcd_partitions[partition_iterator].end)
|
||||
return size;
|
||||
return ADDRESS_RANGE_SIZE(address, mandatory_dpcd_partitions[partition_iterator].end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ranges of DPCD addresses that must be read in a single transaction
|
||||
* XXX: Do not allow any two address ranges in this array to overlap
|
||||
*/
|
||||
static const struct dpcd_address_range mandatory_dpcd_blocks[] = {
|
||||
{ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT }};
|
||||
|
||||
/*
|
||||
* extend addresses to read all mandatory blocks together
|
||||
*/
|
||||
static void dpcd_extend_address_range(
|
||||
const uint32_t in_address,
|
||||
uint8_t * const in_data,
|
||||
const uint32_t in_size,
|
||||
uint32_t *out_address,
|
||||
uint8_t **out_data,
|
||||
uint32_t *out_size)
|
||||
{
|
||||
const uint32_t end_address = END_ADDRESS(in_address, in_size);
|
||||
const struct dpcd_address_range *addr_range;
|
||||
struct dpcd_address_range new_addr_range;
|
||||
uint32_t i;
|
||||
|
||||
new_addr_range.start = in_address;
|
||||
new_addr_range.end = end_address;
|
||||
for (i = 0; i < ARRAY_SIZE(mandatory_dpcd_blocks); i++) {
|
||||
addr_range = &mandatory_dpcd_blocks[i];
|
||||
if (addr_range->start <= in_address && addr_range->end >= in_address)
|
||||
new_addr_range.start = addr_range->start;
|
||||
|
||||
if (addr_range->start <= end_address && addr_range->end >= end_address)
|
||||
new_addr_range.end = addr_range->end;
|
||||
}
|
||||
*out_address = in_address;
|
||||
*out_size = in_size;
|
||||
*out_data = in_data;
|
||||
if (new_addr_range.start != in_address || new_addr_range.end != end_address) {
|
||||
*out_address = new_addr_range.start;
|
||||
*out_size = ADDRESS_RANGE_SIZE(new_addr_range.start, new_addr_range.end);
|
||||
*out_data = kzalloc(*out_size * sizeof(**out_data), GFP_KERNEL);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Reduce the AUX reply down to the values the caller requested
|
||||
*/
|
||||
static void dpcd_reduce_address_range(
|
||||
const uint32_t extended_address,
|
||||
uint8_t * const extended_data,
|
||||
const uint32_t extended_size,
|
||||
const uint32_t reduced_address,
|
||||
uint8_t * const reduced_data,
|
||||
const uint32_t reduced_size)
|
||||
{
|
||||
const uint32_t reduced_end_address = END_ADDRESS(reduced_address, reduced_size);
|
||||
const uint32_t extended_end_address = END_ADDRESS(reduced_address, extended_size);
|
||||
const uint32_t offset = reduced_address - extended_address;
|
||||
|
||||
if (extended_end_address == reduced_end_address && extended_address == reduced_address)
|
||||
return; /* extended and reduced address ranges point to the same data */
|
||||
|
||||
memcpy(&extended_data[offset], reduced_data, reduced_size);
|
||||
kfree(extended_data);
|
||||
}
|
||||
|
||||
enum dc_status core_link_read_dpcd(
|
||||
struct dc_link *link,
|
||||
uint32_t address,
|
||||
uint8_t *data,
|
||||
uint32_t size)
|
||||
{
|
||||
uint32_t extended_address;
|
||||
uint32_t partitioned_address;
|
||||
uint8_t *extended_data;
|
||||
uint32_t extended_size;
|
||||
/* size of the remaining partitioned address space */
|
||||
uint32_t size_left_to_read;
|
||||
enum dc_status status;
|
||||
/* size of the next partition to be read from */
|
||||
uint32_t partition_size;
|
||||
uint32_t data_index = 0;
|
||||
|
||||
dpcd_extend_address_range(address, data, size, &extended_address, &extended_data, &extended_size);
|
||||
partitioned_address = extended_address;
|
||||
size_left_to_read = extended_size;
|
||||
while (size_left_to_read) {
|
||||
partition_size = dpcd_get_next_partition_size(partitioned_address, size_left_to_read);
|
||||
status = internal_link_read_dpcd(link, partitioned_address, &extended_data[data_index], partition_size);
|
||||
if (status != DC_OK)
|
||||
break;
|
||||
partitioned_address += partition_size;
|
||||
data_index += partition_size;
|
||||
size_left_to_read -= partition_size;
|
||||
}
|
||||
dpcd_reduce_address_range(extended_address, extended_data, extended_size, address, data, size);
|
||||
return status;
|
||||
}
|
||||
|
||||
enum dc_status core_link_write_dpcd(
|
||||
struct dc_link *link,
|
||||
uint32_t address,
|
||||
const uint8_t *data,
|
||||
uint32_t size)
|
||||
{
|
||||
uint32_t partition_size;
|
||||
uint32_t data_index = 0;
|
||||
enum dc_status status;
|
||||
|
||||
while (size) {
|
||||
partition_size = dpcd_get_next_partition_size(address, size);
|
||||
status = internal_link_write_dpcd(link, address, &data[data_index], partition_size);
|
||||
if (status != DC_OK)
|
||||
break;
|
||||
address += partition_size;
|
||||
data_index += partition_size;
|
||||
size -= partition_size;
|
||||
}
|
||||
return status;
|
||||
}
|
|
@ -16,6 +16,7 @@
|
|||
#include "resource.h"
|
||||
#include "link_enc_cfg.h"
|
||||
#include "clk_mgr.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
|
||||
static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
|
||||
{
|
||||
|
@ -47,36 +48,6 @@ static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset
|
|||
return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset);
|
||||
}
|
||||
|
||||
enum dc_status core_link_read_dpcd(
|
||||
struct dc_link *link,
|
||||
uint32_t address,
|
||||
uint8_t *data,
|
||||
uint32_t size)
|
||||
{
|
||||
if (!link->aux_access_disabled &&
|
||||
!dm_helpers_dp_read_dpcd(link->ctx,
|
||||
link, address, data, size)) {
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
return DC_OK;
|
||||
}
|
||||
|
||||
enum dc_status core_link_write_dpcd(
|
||||
struct dc_link *link,
|
||||
uint32_t address,
|
||||
const uint8_t *data,
|
||||
uint32_t size)
|
||||
{
|
||||
if (!link->aux_access_disabled &&
|
||||
!dm_helpers_dp_write_dpcd(link->ctx,
|
||||
link, address, data, size)) {
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
return DC_OK;
|
||||
}
|
||||
|
||||
void dp_receiver_power_ctrl(struct dc_link *link, bool on)
|
||||
{
|
||||
uint8_t state;
|
||||
|
|
|
@ -445,7 +445,7 @@ bool resource_are_vblanks_synchronizable(
|
|||
{
|
||||
uint32_t base60_refresh_rates[] = {10, 20, 5};
|
||||
uint8_t i;
|
||||
uint8_t rr_count = sizeof(base60_refresh_rates)/sizeof(base60_refresh_rates[0]);
|
||||
uint8_t rr_count = ARRAY_SIZE(base60_refresh_rates);
|
||||
uint64_t frame_time_diff;
|
||||
|
||||
if (stream1->ctx->dc->config.vblank_alignment_dto_params &&
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
/* forward declaration */
|
||||
struct aux_payload;
|
||||
|
||||
#define DC_VER "3.2.139"
|
||||
#define DC_VER "3.2.140"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
|
@ -303,6 +303,7 @@ struct dc_config {
|
|||
bool multi_mon_pp_mclk_switch;
|
||||
bool disable_dmcu;
|
||||
bool enable_4to1MPC;
|
||||
bool allow_edp_hotplug_detection;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
bool clamp_min_dcfclk;
|
||||
#endif
|
||||
|
|
|
@ -86,6 +86,7 @@ void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
|
|||
|
||||
error:
|
||||
DC_ERROR("Error queuing DMUB command: status=%d\n", status);
|
||||
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
|
||||
}
|
||||
|
||||
void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv)
|
||||
|
@ -95,8 +96,10 @@ void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv)
|
|||
enum dmub_status status;
|
||||
|
||||
status = dmub_srv_cmd_execute(dmub);
|
||||
if (status != DMUB_STATUS_OK)
|
||||
if (status != DMUB_STATUS_OK) {
|
||||
DC_ERROR("Error starting DMUB execution: status=%d\n", status);
|
||||
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
|
||||
}
|
||||
}
|
||||
|
||||
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
|
||||
|
@ -106,8 +109,10 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
|
|||
enum dmub_status status;
|
||||
|
||||
status = dmub_srv_wait_for_idle(dmub, 100000);
|
||||
if (status != DMUB_STATUS_OK)
|
||||
if (status != DMUB_STATUS_OK) {
|
||||
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
|
||||
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
|
||||
}
|
||||
}
|
||||
|
||||
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
|
||||
|
@ -214,3 +219,94 @@ void dc_dmub_trace_event_control(struct dc *dc, bool enable)
|
|||
{
|
||||
dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable);
|
||||
}
|
||||
|
||||
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
|
||||
{
|
||||
if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data)
|
||||
return false;
|
||||
return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data);
|
||||
}
|
||||
|
||||
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
|
||||
{
|
||||
struct dmub_diagnostic_data diag_data = {0};
|
||||
|
||||
if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
|
||||
DC_LOG_ERROR("%s: invalid parameters.", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) {
|
||||
DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
DC_LOG_DEBUG(
|
||||
"DMCUB STATE\n"
|
||||
" dmcub_version : %08x\n"
|
||||
" scratch [0] : %08x\n"
|
||||
" scratch [1] : %08x\n"
|
||||
" scratch [2] : %08x\n"
|
||||
" scratch [3] : %08x\n"
|
||||
" scratch [4] : %08x\n"
|
||||
" scratch [5] : %08x\n"
|
||||
" scratch [6] : %08x\n"
|
||||
" scratch [7] : %08x\n"
|
||||
" scratch [8] : %08x\n"
|
||||
" scratch [9] : %08x\n"
|
||||
" scratch [10] : %08x\n"
|
||||
" scratch [11] : %08x\n"
|
||||
" scratch [12] : %08x\n"
|
||||
" scratch [13] : %08x\n"
|
||||
" scratch [14] : %08x\n"
|
||||
" scratch [15] : %08x\n"
|
||||
" pc : %08x\n"
|
||||
" unk_fault_addr : %08x\n"
|
||||
" inst_fault_addr : %08x\n"
|
||||
" data_fault_addr : %08x\n"
|
||||
" inbox1_rptr : %08x\n"
|
||||
" inbox1_wptr : %08x\n"
|
||||
" inbox1_size : %08x\n"
|
||||
" inbox0_rptr : %08x\n"
|
||||
" inbox0_wptr : %08x\n"
|
||||
" inbox0_size : %08x\n"
|
||||
" is_enabled : %d\n"
|
||||
" is_soft_reset : %d\n"
|
||||
" is_secure_reset : %d\n"
|
||||
" is_traceport_en : %d\n"
|
||||
" is_cw0_en : %d\n"
|
||||
" is_cw6_en : %d\n",
|
||||
diag_data.dmcub_version,
|
||||
diag_data.scratch[0],
|
||||
diag_data.scratch[1],
|
||||
diag_data.scratch[2],
|
||||
diag_data.scratch[3],
|
||||
diag_data.scratch[4],
|
||||
diag_data.scratch[5],
|
||||
diag_data.scratch[6],
|
||||
diag_data.scratch[7],
|
||||
diag_data.scratch[8],
|
||||
diag_data.scratch[9],
|
||||
diag_data.scratch[10],
|
||||
diag_data.scratch[11],
|
||||
diag_data.scratch[12],
|
||||
diag_data.scratch[13],
|
||||
diag_data.scratch[14],
|
||||
diag_data.scratch[15],
|
||||
diag_data.pc,
|
||||
diag_data.undefined_address_fault_addr,
|
||||
diag_data.inst_fetch_fault_addr,
|
||||
diag_data.data_write_fault_addr,
|
||||
diag_data.inbox1_rptr,
|
||||
diag_data.inbox1_wptr,
|
||||
diag_data.inbox1_size,
|
||||
diag_data.inbox0_rptr,
|
||||
diag_data.inbox0_wptr,
|
||||
diag_data.inbox0_size,
|
||||
diag_data.is_dmcub_enabled,
|
||||
diag_data.is_dmcub_soft_reset,
|
||||
diag_data.is_dmcub_secure_reset,
|
||||
diag_data.is_traceport_en,
|
||||
diag_data.is_cw0_enabled,
|
||||
diag_data.is_cw6_enabled);
|
||||
}
|
||||
|
|
|
@ -71,4 +71,8 @@ void dc_dmub_trace_event_control(struct dc *dc, bool enable);
|
|||
|
||||
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
|
||||
|
||||
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca);
|
||||
|
||||
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
|
||||
|
||||
#endif /* _DMUB_DC_SRV_H_ */
|
||||
|
|
|
@ -615,7 +615,8 @@ int dce_aux_transfer_dmub_raw(struct ddc_service *ddc,
|
|||
}
|
||||
|
||||
#define AUX_MAX_RETRIES 7
|
||||
#define AUX_MAX_DEFER_RETRIES 7
|
||||
#define AUX_MIN_DEFER_RETRIES 7
|
||||
#define AUX_MAX_DEFER_TIMEOUT_MS 50
|
||||
#define AUX_MAX_I2C_DEFER_RETRIES 7
|
||||
#define AUX_MAX_INVALID_REPLY_RETRIES 2
|
||||
#define AUX_MAX_TIMEOUT_RETRIES 3
|
||||
|
@ -628,6 +629,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
|||
bool payload_reply = true;
|
||||
enum aux_return_code_type operation_result;
|
||||
bool retry_on_defer = false;
|
||||
struct ddc *ddc_pin = ddc->ddc_pin;
|
||||
struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
|
||||
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
|
||||
uint32_t defer_time_in_ms = 0;
|
||||
|
||||
int aux_ack_retries = 0,
|
||||
aux_defer_retries = 0,
|
||||
|
@ -660,19 +665,27 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
|||
break;
|
||||
|
||||
case AUX_TRANSACTION_REPLY_AUX_DEFER:
|
||||
/* polling_timeout_period is in us */
|
||||
defer_time_in_ms += aux110->polling_timeout_period / 1000;
|
||||
++aux_defer_retries;
|
||||
/* fall through */
|
||||
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
|
||||
retry_on_defer = true;
|
||||
fallthrough;
|
||||
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
|
||||
if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
|
||||
if (aux_defer_retries >= AUX_MIN_DEFER_RETRIES
|
||||
&& defer_time_in_ms >= AUX_MAX_DEFER_TIMEOUT_MS) {
|
||||
goto fail;
|
||||
} else {
|
||||
if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) ||
|
||||
(*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
|
||||
if (payload->defer_delay > 1)
|
||||
if (payload->defer_delay > 1) {
|
||||
msleep(payload->defer_delay);
|
||||
else if (payload->defer_delay <= 1)
|
||||
defer_time_in_ms += payload->defer_delay;
|
||||
} else if (payload->defer_delay <= 1) {
|
||||
udelay(payload->defer_delay * 1000);
|
||||
defer_time_in_ms += payload->defer_delay;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -701,7 +714,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
|||
// Check whether a DEFER had occurred before the timeout.
|
||||
// If so, treat timeout as a DEFER.
|
||||
if (retry_on_defer) {
|
||||
if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
|
||||
if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES)
|
||||
goto fail;
|
||||
else if (payload->defer_delay > 0)
|
||||
msleep(payload->defer_delay);
|
||||
|
|
|
@ -65,7 +65,6 @@
|
|||
|
||||
#include "atomfirmware.h"
|
||||
|
||||
#include "dce110_hw_sequencer.h"
|
||||
#include "dcn10/dcn10_hw_sequencer.h"
|
||||
|
||||
#define GAMMA_HW_POINTS_NUM 256
|
||||
|
|
|
@ -54,6 +54,8 @@
|
|||
#include "dce/dmub_hw_lock_mgr.h"
|
||||
#include "dc_trace.h"
|
||||
#include "dce/dmub_outbox.h"
|
||||
#include "inc/dc_link_dp.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
|
@ -1403,6 +1405,9 @@ void dcn10_init_hw(struct dc *dc)
|
|||
if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
|
||||
continue;
|
||||
|
||||
/* DP 2.0 requires that LTTPR Caps be read first */
|
||||
dp_retrieve_lttpr_cap(dc->links[i]);
|
||||
|
||||
/*
|
||||
* If any of the displays are lit up turn them off.
|
||||
* The reason is that some MST hubs cannot be turned off
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
SRI(AFMT_60958_1, DIG, id), \
|
||||
SRI(AFMT_60958_2, DIG, id), \
|
||||
SRI(DIG_FE_CNTL, DIG, id), \
|
||||
SRI(DIG_FIFO_STATUS, DIG, id), \
|
||||
SRI(HDMI_CONTROL, DIG, id), \
|
||||
SRI(HDMI_DB_CONTROL, DIG, id), \
|
||||
SRI(HDMI_GC, DIG, id), \
|
||||
|
@ -124,6 +125,7 @@ struct dcn10_stream_enc_registers {
|
|||
uint32_t AFMT_60958_2;
|
||||
uint32_t DIG_FE_CNTL;
|
||||
uint32_t DIG_FE_CNTL2;
|
||||
uint32_t DIG_FIFO_STATUS;
|
||||
uint32_t DP_MSE_RATE_CNTL;
|
||||
uint32_t DP_MSE_RATE_UPDATE;
|
||||
uint32_t DP_PIXEL_FORMAT;
|
||||
|
@ -266,6 +268,17 @@ struct dcn10_stream_enc_registers {
|
|||
SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_LEVEL_ERROR, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_USE_OVERWRITE_LEVEL, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_OVERWRITE_LEVEL, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_ERROR_ACK, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CAL_AVERAGE_LEVEL, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MAXIMUM_LEVEL, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MINIMUM_LEVEL, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_READ_CLOCK_SRC, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CALIBRATED, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECAL_AVERAGE, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECOMP_MINMAX, mask_sh),\
|
||||
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, mask_sh),\
|
||||
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, mask_sh),\
|
||||
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, mask_sh),\
|
||||
|
@ -488,6 +501,17 @@ struct dcn10_stream_enc_registers {
|
|||
type DP_VID_N_MUL;\
|
||||
type DP_VID_M_DOUBLE_VALUE_EN;\
|
||||
type DIG_SOURCE_SELECT;\
|
||||
type DIG_FIFO_LEVEL_ERROR;\
|
||||
type DIG_FIFO_USE_OVERWRITE_LEVEL;\
|
||||
type DIG_FIFO_OVERWRITE_LEVEL;\
|
||||
type DIG_FIFO_ERROR_ACK;\
|
||||
type DIG_FIFO_CAL_AVERAGE_LEVEL;\
|
||||
type DIG_FIFO_MAXIMUM_LEVEL;\
|
||||
type DIG_FIFO_MINIMUM_LEVEL;\
|
||||
type DIG_FIFO_READ_CLOCK_SRC;\
|
||||
type DIG_FIFO_CALIBRATED;\
|
||||
type DIG_FIFO_FORCE_RECAL_AVERAGE;\
|
||||
type DIG_FIFO_FORCE_RECOMP_MINMAX;\
|
||||
type DIG_CLOCK_PATTERN
|
||||
|
||||
#define SE_REG_FIELD_LIST_DCN2_0(type) \
|
||||
|
|
|
@ -552,6 +552,17 @@ void enc2_stream_encoder_dp_set_stream_attribute(
|
|||
DP_SST_SDP_SPLITTING, enable_sdp_splitting);
|
||||
}
|
||||
|
||||
uint32_t enc2_get_fifo_cal_average_level(
|
||||
struct stream_encoder *enc)
|
||||
{
|
||||
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
|
||||
uint32_t fifo_level;
|
||||
|
||||
REG_GET(DIG_FIFO_STATUS,
|
||||
DIG_FIFO_CAL_AVERAGE_LEVEL, &fifo_level);
|
||||
return fifo_level;
|
||||
}
|
||||
|
||||
static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
|
||||
.dp_set_odm_combine =
|
||||
enc2_dp_set_odm_combine,
|
||||
|
@ -598,6 +609,7 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
|
|||
.dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet,
|
||||
.set_dynamic_metadata = enc2_set_dynamic_metadata,
|
||||
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
|
||||
.get_fifo_cal_average_level = enc2_get_fifo_cal_average_level,
|
||||
};
|
||||
|
||||
void dcn20_stream_encoder_construct(
|
||||
|
|
|
@ -112,4 +112,7 @@ void enc2_set_dynamic_metadata(struct stream_encoder *enc,
|
|||
uint32_t hubp_requestor_id,
|
||||
enum dynamic_metadata_mode dmdata_mode);
|
||||
|
||||
uint32_t enc2_get_fifo_cal_average_level(
|
||||
struct stream_encoder *enc);
|
||||
|
||||
#endif /* __DC_STREAM_ENCODER_DCN20_H__ */
|
||||
|
|
|
@ -823,6 +823,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
|
|||
.dp_set_dsc_pps_info_packet = enc3_dp_set_dsc_pps_info_packet,
|
||||
.set_dynamic_metadata = enc2_set_dynamic_metadata,
|
||||
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
|
||||
|
||||
.get_fifo_cal_average_level = enc2_get_fifo_cal_average_level,
|
||||
};
|
||||
|
||||
void dcn30_dio_stream_encoder_construct(
|
||||
|
|
|
@ -106,6 +106,7 @@
|
|||
SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
|
||||
SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
|
||||
SRI(DIG_FE_CNTL, DIG, id), \
|
||||
SRI(DIG_FIFO_STATUS, DIG, id), \
|
||||
SRI(DIG_CLOCK_PATTERN, DIG, id)
|
||||
|
||||
|
||||
|
@ -167,6 +168,17 @@
|
|||
SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_LEVEL_ERROR, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_USE_OVERWRITE_LEVEL, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_OVERWRITE_LEVEL, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_ERROR_ACK, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CAL_AVERAGE_LEVEL, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MAXIMUM_LEVEL, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MINIMUM_LEVEL, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_READ_CLOCK_SRC, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CALIBRATED, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECAL_AVERAGE, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECOMP_MINMAX, mask_sh),\
|
||||
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\
|
||||
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
|
||||
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
|
||||
|
|
|
@ -48,6 +48,8 @@
|
|||
#include "dc_dmub_srv.h"
|
||||
#include "link_hwss.h"
|
||||
#include "dpcd_defs.h"
|
||||
#include "inc/dc_link_dp.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
|
||||
|
||||
|
||||
|
@ -529,6 +531,8 @@ void dcn30_init_hw(struct dc *dc)
|
|||
for (i = 0; i < dc->link_count; i++) {
|
||||
if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
|
||||
continue;
|
||||
/* DP 2.0 states that LTTPR regs must be read first */
|
||||
dp_retrieve_lttpr_cap(dc->links[i]);
|
||||
|
||||
/* if any of the displays are lit up turn them off */
|
||||
status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
|
||||
|
|
|
@ -38,3 +38,8 @@ void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool po
|
|||
{
|
||||
/*DCN303 removes PG registers*/
|
||||
}
|
||||
|
||||
void dcn303_enable_power_gating_plane(struct dce_hwseq *hws, bool enable)
|
||||
{
|
||||
/*DCN303 removes PG registers*/
|
||||
}
|
||||
|
|
|
@ -13,5 +13,6 @@
|
|||
void dcn303_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on);
|
||||
void dcn303_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
|
||||
void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on);
|
||||
void dcn303_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
|
||||
|
||||
#endif /* __DC_HWSS_DCN303_H__ */
|
||||
|
|
|
@ -16,4 +16,5 @@ void dcn303_hw_sequencer_construct(struct dc *dc)
|
|||
dc->hwseq->funcs.dpp_pg_control = dcn303_dpp_pg_control;
|
||||
dc->hwseq->funcs.hubp_pg_control = dcn303_hubp_pg_control;
|
||||
dc->hwseq->funcs.dsc_pg_control = dcn303_dsc_pg_control;
|
||||
dc->hwseq->funcs.enable_power_gating_plane = dcn303_enable_power_gating_plane;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include "clk_mgr.h"
|
||||
#include "reg_helper.h"
|
||||
#include "abm.h"
|
||||
#include "clk_mgr.h"
|
||||
#include "hubp.h"
|
||||
#include "dchubbub.h"
|
||||
#include "timing_generator.h"
|
||||
|
@ -47,6 +46,7 @@
|
|||
#include "dpcd_defs.h"
|
||||
#include "dce/dmub_outbox.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
|
|
|
@ -934,7 +934,6 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.dmub_command_table = true,
|
||||
.pstate_enabled = true,
|
||||
.use_max_lb = true,
|
||||
.pstate_enabled = true,
|
||||
.enable_mem_low_power = {
|
||||
.bits = {
|
||||
.vga = false,
|
||||
|
|
|
@ -3536,7 +3536,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
|
|||
*BytePerPixelDETC = 0;
|
||||
*BytePerPixelY = 4;
|
||||
*BytePerPixelC = 0;
|
||||
} else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
|
||||
} else if (SourcePixelFormat == dm_444_16) {
|
||||
*BytePerPixelDETY = 2;
|
||||
*BytePerPixelDETC = 0;
|
||||
*BytePerPixelY = 2;
|
||||
|
@ -5674,7 +5674,7 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
|||
for (k = 0; k < v->NumberOfActivePlanes; k++) {
|
||||
if (v->ViewportWidth[k] > v->SurfaceWidthY[k] || v->ViewportHeight[k] > v->SurfaceHeightY[k]) {
|
||||
ViewportExceedsSurface = true;
|
||||
if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
|
||||
if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32
|
||||
&& v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8
|
||||
&& v->SourcePixelFormat[k] != dm_rgbe) {
|
||||
if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k]
|
||||
|
|
|
@ -159,7 +159,6 @@ dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_l_in_us, mode_lib->vba.TimeP
|
|||
dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_c_in_us, mode_lib->vba.TimePerChromaMetaChunkVBlank);
|
||||
dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_l_in_us, mode_lib->vba.TimePerMetaChunkFlip);
|
||||
dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_c_in_us, mode_lib->vba.TimePerChromaMetaChunkFlip);
|
||||
|
||||
dml_get_pipe_attr_func(vstartup, mode_lib->vba.VStartup);
|
||||
dml_get_pipe_attr_func(vupdate_offset, mode_lib->vba.VUpdateOffsetPix);
|
||||
dml_get_pipe_attr_func(vupdate_width, mode_lib->vba.VUpdateWidthPix);
|
||||
|
@ -419,7 +418,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
|
|||
visited[j] = true;
|
||||
|
||||
mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
|
||||
|
||||
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
|
||||
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
|
||||
(enum scan_direction_class) (src->source_scan);
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include "core_types.h"
|
||||
#include "dc_link_ddc.h"
|
||||
#include "link_hwss.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
link->ctx->logger
|
||||
|
|
|
@ -42,7 +42,15 @@ enum {
|
|||
/* to avoid infinite loop where-in the receiver
|
||||
* switches between different VS
|
||||
*/
|
||||
LINK_TRAINING_MAX_CR_RETRY = 100
|
||||
LINK_TRAINING_MAX_CR_RETRY = 100,
|
||||
/*
|
||||
* Some receivers fail to train on first try and are good
|
||||
* on subsequent tries. 2 retries should be plenty. If we
|
||||
* don't have a successful training then we don't expect to
|
||||
* ever get one.
|
||||
*/
|
||||
LINK_TRAINING_MAX_VERIFY_RETRY = 2,
|
||||
PEAK_FACTOR_X1000 = 1006,
|
||||
};
|
||||
|
||||
bool dp_verify_link_cap(
|
||||
|
@ -182,4 +190,5 @@ enum dc_status dpcd_configure_lttpr_mode(
|
|||
struct link_training_settings *lt_settings);
|
||||
|
||||
enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings);
|
||||
bool dp_retrieve_lttpr_cap(struct dc_link *link);
|
||||
#endif /* __DC_LINK_DP_H__ */
|
||||
|
|
|
@ -237,6 +237,9 @@ struct stream_encoder_funcs {
|
|||
void (*dp_set_odm_combine)(
|
||||
struct stream_encoder *enc,
|
||||
bool odm_combine);
|
||||
|
||||
uint32_t (*get_fifo_cal_average_level)(
|
||||
struct stream_encoder *enc);
|
||||
};
|
||||
|
||||
#endif /* STREAM_ENCODER_H_ */
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
#ifndef __LINK_DPCD_H__
|
||||
#define __LINK_DPCD_H__
|
||||
#include <inc/core_status.h>
|
||||
#include <dc_link.h>
|
||||
#include <inc/link_hwss.h>
|
||||
|
||||
enum dc_status core_link_read_dpcd(
|
||||
struct dc_link *link,
|
||||
uint32_t address,
|
||||
uint8_t *data,
|
||||
uint32_t size);
|
||||
|
||||
enum dc_status core_link_write_dpcd(
|
||||
struct dc_link *link,
|
||||
uint32_t address,
|
||||
const uint8_t *data,
|
||||
uint32_t size);
|
||||
#endif
|
|
@ -26,20 +26,6 @@
|
|||
#ifndef __DC_LINK_HWSS_H__
|
||||
#define __DC_LINK_HWSS_H__
|
||||
|
||||
#include "inc/core_status.h"
|
||||
|
||||
enum dc_status core_link_read_dpcd(
|
||||
struct dc_link *link,
|
||||
uint32_t address,
|
||||
uint8_t *data,
|
||||
uint32_t size);
|
||||
|
||||
enum dc_status core_link_write_dpcd(
|
||||
struct dc_link *link,
|
||||
uint32_t address,
|
||||
const uint8_t *data,
|
||||
uint32_t size);
|
||||
|
||||
struct gpio *get_hpd_gpio(struct dc_bios *dcb,
|
||||
struct graphics_object_id link_id,
|
||||
struct gpio_service *gpio_service);
|
||||
|
|
|
@ -243,6 +243,31 @@ struct dmub_srv_hw_params {
|
|||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dmub_diagnostic_data - Diagnostic data retrieved from DMCUB for
|
||||
* debugging purposes, including logging, crash analysis, etc.
|
||||
*/
|
||||
struct dmub_diagnostic_data {
|
||||
uint32_t dmcub_version;
|
||||
uint32_t scratch[16];
|
||||
uint32_t pc;
|
||||
uint32_t undefined_address_fault_addr;
|
||||
uint32_t inst_fetch_fault_addr;
|
||||
uint32_t data_write_fault_addr;
|
||||
uint32_t inbox1_rptr;
|
||||
uint32_t inbox1_wptr;
|
||||
uint32_t inbox1_size;
|
||||
uint32_t inbox0_rptr;
|
||||
uint32_t inbox0_wptr;
|
||||
uint32_t inbox0_size;
|
||||
uint8_t is_dmcub_enabled : 1;
|
||||
uint8_t is_dmcub_soft_reset : 1;
|
||||
uint8_t is_dmcub_secure_reset : 1;
|
||||
uint8_t is_traceport_en : 1;
|
||||
uint8_t is_cw0_enabled : 1;
|
||||
uint8_t is_cw6_enabled : 1;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dmub_srv_base_funcs - Driver specific base callbacks
|
||||
*/
|
||||
|
@ -335,6 +360,8 @@ struct dmub_srv_hw_funcs {
|
|||
|
||||
void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
|
||||
uint32_t (*get_current_time)(struct dmub_srv *dmub);
|
||||
|
||||
void (*get_diagnostic_data)(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -685,6 +712,8 @@ enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
|
|||
|
||||
bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry);
|
||||
|
||||
bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -47,10 +47,10 @@
|
|||
|
||||
/* Firmware versioning. */
|
||||
#ifdef DMUB_EXPOSE_VERSION
|
||||
#define DMUB_FW_VERSION_GIT_HASH 0xefd666c1
|
||||
#define DMUB_FW_VERSION_GIT_HASH 0x5cac099d3
|
||||
#define DMUB_FW_VERSION_MAJOR 0
|
||||
#define DMUB_FW_VERSION_MINOR 0
|
||||
#define DMUB_FW_VERSION_REVISION 69
|
||||
#define DMUB_FW_VERSION_REVISION 70
|
||||
#define DMUB_FW_VERSION_TEST 0
|
||||
#define DMUB_FW_VERSION_VBIOS 0
|
||||
#define DMUB_FW_VERSION_HOTFIX 0
|
||||
|
|
|
@ -40,7 +40,10 @@
|
|||
|
||||
const struct dmub_srv_common_regs dmub_srv_dcn20_regs = {
|
||||
#define DMUB_SR(reg) REG_OFFSET(reg),
|
||||
{ DMUB_COMMON_REGS() },
|
||||
{
|
||||
DMUB_COMMON_REGS()
|
||||
DMCUB_INTERNAL_REGS()
|
||||
},
|
||||
#undef DMUB_SR
|
||||
|
||||
#define DMUB_SF(reg, field) FD_MASK(reg, field),
|
||||
|
@ -404,3 +407,63 @@ uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub)
|
|||
{
|
||||
return REG_READ(DMCUB_TIMER_CURRENT);
|
||||
}
|
||||
|
||||
void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
|
||||
{
|
||||
uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
|
||||
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
|
||||
|
||||
if (!dmub || !diag_data)
|
||||
return;
|
||||
|
||||
memset(diag_data, 0, sizeof(*diag_data));
|
||||
|
||||
diag_data->dmcub_version = dmub->fw_version;
|
||||
|
||||
diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
|
||||
diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
|
||||
diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
|
||||
diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
|
||||
diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
|
||||
diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
|
||||
diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
|
||||
diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
|
||||
diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
|
||||
diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
|
||||
diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
|
||||
diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
|
||||
diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
|
||||
diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
|
||||
diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
|
||||
diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
|
||||
|
||||
diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
|
||||
diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
|
||||
diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
|
||||
|
||||
diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
|
||||
diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
|
||||
diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
|
||||
|
||||
diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
|
||||
diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
|
||||
diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
|
||||
|
||||
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
|
||||
diag_data->is_dmcub_enabled = is_dmub_enabled;
|
||||
|
||||
REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &is_soft_reset);
|
||||
diag_data->is_dmcub_soft_reset = is_soft_reset;
|
||||
|
||||
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
|
||||
diag_data->is_dmcub_secure_reset = is_sec_reset;
|
||||
|
||||
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
|
||||
diag_data->is_traceport_en = is_traceport_enabled;
|
||||
|
||||
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
|
||||
diag_data->is_cw0_enabled = is_cw0_enabled;
|
||||
|
||||
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
|
||||
diag_data->is_cw6_enabled = is_cw6_enabled;
|
||||
}
|
||||
|
|
|
@ -36,6 +36,9 @@ struct dmub_srv;
|
|||
DMUB_SR(DMCUB_CNTL) \
|
||||
DMUB_SR(DMCUB_MEM_CNTL) \
|
||||
DMUB_SR(DMCUB_SEC_CNTL) \
|
||||
DMUB_SR(DMCUB_INBOX0_SIZE) \
|
||||
DMUB_SR(DMCUB_INBOX0_RPTR) \
|
||||
DMUB_SR(DMCUB_INBOX0_WPTR) \
|
||||
DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \
|
||||
DMUB_SR(DMCUB_INBOX1_SIZE) \
|
||||
DMUB_SR(DMCUB_INBOX1_RPTR) \
|
||||
|
@ -108,7 +111,12 @@ struct dmub_srv;
|
|||
DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
|
||||
DMUB_SR(DCN_VM_FB_OFFSET) \
|
||||
DMUB_SR(DMCUB_INTERRUPT_ACK) \
|
||||
DMUB_SR(DMCUB_TIMER_CURRENT)
|
||||
DMUB_SR(DMCUB_TIMER_CURRENT) \
|
||||
DMUB_SR(DMCUB_INST_FETCH_FAULT_ADDR) \
|
||||
DMUB_SR(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR) \
|
||||
DMUB_SR(DMCUB_DATA_WRITE_FAULT_ADDR)
|
||||
|
||||
#define DMCUB_INTERNAL_REGS()
|
||||
|
||||
#define DMUB_COMMON_FIELDS() \
|
||||
DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \
|
||||
|
@ -118,6 +126,7 @@ struct dmub_srv;
|
|||
DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_WRITE_SPACE) \
|
||||
DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \
|
||||
DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \
|
||||
DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS) \
|
||||
DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \
|
||||
DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \
|
||||
DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \
|
||||
|
@ -147,6 +156,7 @@ struct dmub_srv;
|
|||
struct dmub_srv_common_reg_offset {
|
||||
#define DMUB_SR(reg) uint32_t reg;
|
||||
DMUB_COMMON_REGS()
|
||||
DMCUB_INTERNAL_REGS()
|
||||
#undef DMUB_SR
|
||||
};
|
||||
|
||||
|
@ -234,4 +244,6 @@ bool dmub_dcn20_use_cached_trace_buffer(struct dmub_srv *dmub);
|
|||
|
||||
uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub);
|
||||
|
||||
void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca);
|
||||
|
||||
#endif /* _DMUB_DCN20_H_ */
|
||||
|
|
|
@ -39,7 +39,10 @@
|
|||
|
||||
const struct dmub_srv_common_regs dmub_srv_dcn21_regs = {
|
||||
#define DMUB_SR(reg) REG_OFFSET(reg),
|
||||
{ DMUB_COMMON_REGS() },
|
||||
{
|
||||
DMUB_COMMON_REGS()
|
||||
DMCUB_INTERNAL_REGS()
|
||||
},
|
||||
#undef DMUB_SR
|
||||
|
||||
#define DMUB_SF(reg, field) FD_MASK(reg, field),
|
||||
|
|
|
@ -40,7 +40,10 @@
|
|||
|
||||
const struct dmub_srv_common_regs dmub_srv_dcn30_regs = {
|
||||
#define DMUB_SR(reg) REG_OFFSET(reg),
|
||||
{ DMUB_COMMON_REGS() },
|
||||
{
|
||||
DMUB_COMMON_REGS()
|
||||
DMCUB_INTERNAL_REGS()
|
||||
},
|
||||
#undef DMUB_SR
|
||||
|
||||
#define DMUB_SF(reg, field) FD_MASK(reg, field),
|
||||
|
|
|
@ -39,7 +39,10 @@
|
|||
|
||||
const struct dmub_srv_common_regs dmub_srv_dcn301_regs = {
|
||||
#define DMUB_SR(reg) REG_OFFSET(reg),
|
||||
{ DMUB_COMMON_REGS() },
|
||||
{
|
||||
DMUB_COMMON_REGS()
|
||||
DMCUB_INTERNAL_REGS()
|
||||
},
|
||||
#undef DMUB_SR
|
||||
|
||||
#define DMUB_SF(reg, field) FD_MASK(reg, field),
|
||||
|
|
|
@ -39,7 +39,10 @@
|
|||
|
||||
const struct dmub_srv_common_regs dmub_srv_dcn302_regs = {
|
||||
#define DMUB_SR(reg) REG_OFFSET(reg),
|
||||
{ DMUB_COMMON_REGS() },
|
||||
{
|
||||
DMUB_COMMON_REGS()
|
||||
DMCUB_INTERNAL_REGS()
|
||||
},
|
||||
#undef DMUB_SR
|
||||
|
||||
#define DMUB_SF(reg, field) FD_MASK(reg, field),
|
||||
|
|
|
@ -21,7 +21,10 @@
|
|||
|
||||
const struct dmub_srv_common_regs dmub_srv_dcn303_regs = {
|
||||
#define DMUB_SR(reg) REG_OFFSET(reg),
|
||||
{ DMUB_COMMON_REGS() },
|
||||
{
|
||||
DMUB_COMMON_REGS()
|
||||
DMCUB_INTERNAL_REGS()
|
||||
},
|
||||
#undef DMUB_SR
|
||||
|
||||
#define DMUB_SF(reg, field) FD_MASK(reg, field),
|
||||
|
|
|
@ -176,6 +176,8 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
|
|||
funcs->get_outbox0_wptr = dmub_dcn20_get_outbox0_wptr;
|
||||
funcs->set_outbox0_rptr = dmub_dcn20_set_outbox0_rptr;
|
||||
|
||||
funcs->get_diagnostic_data = dmub_dcn20_get_diagnostic_data;
|
||||
|
||||
if (asic == DMUB_ASIC_DCN21) {
|
||||
dmub->regs = &dmub_srv_dcn21_regs;
|
||||
|
||||
|
@ -794,3 +796,11 @@ bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entr
|
|||
|
||||
return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry);
|
||||
}
|
||||
|
||||
bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
|
||||
{
|
||||
if (!dmub || !dmub->hw_funcs.get_diagnostic_data || !diag_data)
|
||||
return false;
|
||||
dmub->hw_funcs.get_diagnostic_data(dmub, diag_data);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -87,19 +87,19 @@ struct abm_parameters {
|
|||
};
|
||||
|
||||
static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = {
|
||||
// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed
|
||||
{0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0, 0xCCCC, 0xCCCC},
|
||||
{0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf, 0xCCCC, 0xCCCC},
|
||||
{0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0, 0xCCCC, 0xCCCC},
|
||||
{0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
|
||||
// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blRed blStart
|
||||
{0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0, 0xf777, 0xcccc},
|
||||
{0xde, 0x85, 0x20, 0x00, 0xe0, 0x90, 0xa8, 0x40, 0xc8, 0xf777, 0xcccc},
|
||||
{0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0, 0xeeee, 0x9999},
|
||||
{0x82, 0x40, 0x20, 0x00, 0x00, 0xb8, 0xb3, 0x70, 0x70, 0xe333, 0xb333},
|
||||
};
|
||||
|
||||
static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = {
|
||||
// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed
|
||||
{0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
|
||||
{0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
|
||||
{0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
|
||||
{0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
|
||||
// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blRed blStart
|
||||
{0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc},
|
||||
{0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc},
|
||||
{0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc},
|
||||
{0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc},
|
||||
};
|
||||
|
||||
static const struct abm_parameters * const abm_settings[] = {
|
||||
|
|
|
@ -29292,6 +29292,7 @@
|
|||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
|
||||
|
@ -34431,6 +34432,7 @@
|
|||
#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
|
||||
#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
|
||||
#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
|
||||
#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
|
||||
#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
|
||||
#define DIG3_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
|
||||
|
|
|
@ -33869,6 +33869,7 @@
|
|||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
|
||||
|
@ -33879,6 +33880,7 @@
|
|||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
|
||||
#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
|
||||
|
|
|
@ -560,7 +560,7 @@ struct gpu_metrics_v1_2 {
|
|||
uint16_t current_vclk1;
|
||||
uint16_t current_dclk1;
|
||||
|
||||
/* Throttle status */
|
||||
/* Throttle status (ASIC dependent) */
|
||||
uint32_t throttle_status;
|
||||
|
||||
/* Fans */
|
||||
|
@ -648,6 +648,9 @@ struct gpu_metrics_v1_3 {
|
|||
uint16_t voltage_mem;
|
||||
|
||||
uint16_t padding1;
|
||||
|
||||
/* Throttle status (ASIC independent) */
|
||||
uint64_t indep_throttle_status;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -754,4 +757,57 @@ struct gpu_metrics_v2_1 {
|
|||
uint16_t padding[3];
|
||||
};
|
||||
|
||||
struct gpu_metrics_v2_2 {
|
||||
struct metrics_table_header common_header;
|
||||
|
||||
/* Temperature */
|
||||
uint16_t temperature_gfx; // gfx temperature on APUs
|
||||
uint16_t temperature_soc; // soc temperature on APUs
|
||||
uint16_t temperature_core[8]; // CPU core temperature on APUs
|
||||
uint16_t temperature_l3[2];
|
||||
|
||||
/* Utilization */
|
||||
uint16_t average_gfx_activity;
|
||||
uint16_t average_mm_activity; // UVD or VCN
|
||||
|
||||
/* Driver attached timestamp (in ns) */
|
||||
uint64_t system_clock_counter;
|
||||
|
||||
/* Power/Energy */
|
||||
uint16_t average_socket_power; // dGPU + APU power on A + A platform
|
||||
uint16_t average_cpu_power;
|
||||
uint16_t average_soc_power;
|
||||
uint16_t average_gfx_power;
|
||||
uint16_t average_core_power[8]; // CPU core power on APUs
|
||||
|
||||
/* Average clocks */
|
||||
uint16_t average_gfxclk_frequency;
|
||||
uint16_t average_socclk_frequency;
|
||||
uint16_t average_uclk_frequency;
|
||||
uint16_t average_fclk_frequency;
|
||||
uint16_t average_vclk_frequency;
|
||||
uint16_t average_dclk_frequency;
|
||||
|
||||
/* Current clocks */
|
||||
uint16_t current_gfxclk;
|
||||
uint16_t current_socclk;
|
||||
uint16_t current_uclk;
|
||||
uint16_t current_fclk;
|
||||
uint16_t current_vclk;
|
||||
uint16_t current_dclk;
|
||||
uint16_t current_coreclk[8]; // CPU core clocks
|
||||
uint16_t current_l3clk[2];
|
||||
|
||||
/* Throttle status (ASIC dependent) */
|
||||
uint32_t throttle_status;
|
||||
|
||||
/* Fans */
|
||||
uint16_t fan_pwm;
|
||||
|
||||
uint16_t padding[3];
|
||||
|
||||
/* Throttle status (ASIC independent) */
|
||||
uint64_t indep_throttle_status;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -35,6 +35,48 @@
|
|||
|
||||
#define SMU_DPM_USER_PROFILE_RESTORE (1 << 0)
|
||||
|
||||
// Power Throttlers
|
||||
#define SMU_THROTTLER_PPT0_BIT 0
|
||||
#define SMU_THROTTLER_PPT1_BIT 1
|
||||
#define SMU_THROTTLER_PPT2_BIT 2
|
||||
#define SMU_THROTTLER_PPT3_BIT 3
|
||||
#define SMU_THROTTLER_SPL_BIT 4
|
||||
#define SMU_THROTTLER_FPPT_BIT 5
|
||||
#define SMU_THROTTLER_SPPT_BIT 6
|
||||
#define SMU_THROTTLER_SPPT_APU_BIT 7
|
||||
|
||||
// Current Throttlers
|
||||
#define SMU_THROTTLER_TDC_GFX_BIT 16
|
||||
#define SMU_THROTTLER_TDC_SOC_BIT 17
|
||||
#define SMU_THROTTLER_TDC_MEM_BIT 18
|
||||
#define SMU_THROTTLER_TDC_VDD_BIT 19
|
||||
#define SMU_THROTTLER_TDC_CVIP_BIT 20
|
||||
#define SMU_THROTTLER_EDC_CPU_BIT 21
|
||||
#define SMU_THROTTLER_EDC_GFX_BIT 22
|
||||
#define SMU_THROTTLER_APCC_BIT 23
|
||||
|
||||
// Temperature
|
||||
#define SMU_THROTTLER_TEMP_GPU_BIT 32
|
||||
#define SMU_THROTTLER_TEMP_CORE_BIT 33
|
||||
#define SMU_THROTTLER_TEMP_MEM_BIT 34
|
||||
#define SMU_THROTTLER_TEMP_EDGE_BIT 35
|
||||
#define SMU_THROTTLER_TEMP_HOTSPOT_BIT 36
|
||||
#define SMU_THROTTLER_TEMP_SOC_BIT 37
|
||||
#define SMU_THROTTLER_TEMP_VR_GFX_BIT 38
|
||||
#define SMU_THROTTLER_TEMP_VR_SOC_BIT 39
|
||||
#define SMU_THROTTLER_TEMP_VR_MEM0_BIT 40
|
||||
#define SMU_THROTTLER_TEMP_VR_MEM1_BIT 41
|
||||
#define SMU_THROTTLER_TEMP_LIQUID0_BIT 42
|
||||
#define SMU_THROTTLER_TEMP_LIQUID1_BIT 43
|
||||
#define SMU_THROTTLER_VRHOT0_BIT 44
|
||||
#define SMU_THROTTLER_VRHOT1_BIT 45
|
||||
#define SMU_THROTTLER_PROCHOT_CPU_BIT 46
|
||||
#define SMU_THROTTLER_PROCHOT_GFX_BIT 47
|
||||
|
||||
// Other
|
||||
#define SMU_THROTTLER_PPM_BIT 56
|
||||
#define SMU_THROTTLER_FIT_BIT 57
|
||||
|
||||
struct smu_hw_power_state {
|
||||
unsigned int magic;
|
||||
};
|
||||
|
@ -723,7 +765,10 @@ struct pptable_funcs {
|
|||
/**
|
||||
* @get_power_limit: Get the device's power limits.
|
||||
*/
|
||||
int (*get_power_limit)(struct smu_context *smu);
|
||||
int (*get_power_limit)(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit);
|
||||
|
||||
/**
|
||||
* @get_ppt_limit: Get the device's ppt limits.
|
||||
|
@ -932,7 +977,9 @@ struct pptable_funcs {
|
|||
* @disable_all_features_with_exception: Disable all features with
|
||||
* exception to those in &mask.
|
||||
*/
|
||||
int (*disable_all_features_with_exception)(struct smu_context *smu, enum smu_feature_mask mask);
|
||||
int (*disable_all_features_with_exception)(struct smu_context *smu,
|
||||
bool no_hw_disablement,
|
||||
enum smu_feature_mask mask);
|
||||
|
||||
/**
|
||||
* @notify_display_change: Enable fast memory clock switching.
|
||||
|
|
|
@ -244,6 +244,9 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
|
|||
int smu_v11_0_baco_enter(struct smu_context *smu);
|
||||
int smu_v11_0_baco_exit(struct smu_context *smu);
|
||||
|
||||
int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
|
||||
enum smu_v11_0_baco_seq baco_seq);
|
||||
|
||||
int smu_v11_0_mode1_reset(struct smu_context *smu);
|
||||
|
||||
int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
|
||||
|
|
|
@ -688,7 +688,10 @@ static int smu_late_init(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_get_asic_power_limits(smu);
|
||||
ret = smu_get_asic_power_limits(smu,
|
||||
&smu->current_power_limit,
|
||||
&smu->default_power_limit,
|
||||
&smu->max_power_limit);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to get asic power limits!\n");
|
||||
return ret;
|
||||
|
@ -1379,15 +1382,20 @@ static int smu_disable_dpms(struct smu_context *smu)
|
|||
if (smu->uploading_custom_pp_table &&
|
||||
(adev->asic_type >= CHIP_NAVI10) &&
|
||||
(adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
|
||||
return 0;
|
||||
return smu_disable_all_features_with_exception(smu,
|
||||
true,
|
||||
SMU_FEATURE_COUNT);
|
||||
|
||||
/*
|
||||
* For Sienna_Cichlid, PMFW will handle the features disablement properly
|
||||
* on BACO in. Driver involvement is unnecessary.
|
||||
*/
|
||||
if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
|
||||
if (((adev->asic_type == CHIP_SIENNA_CICHLID) ||
|
||||
((adev->asic_type >= CHIP_NAVI10) && (adev->asic_type <= CHIP_NAVI12))) &&
|
||||
use_baco)
|
||||
return 0;
|
||||
return smu_disable_all_features_with_exception(smu,
|
||||
true,
|
||||
SMU_FEATURE_BACO_BIT);
|
||||
|
||||
/*
|
||||
* For gpu reset, runpm and hibernation through BACO,
|
||||
|
@ -1395,6 +1403,7 @@ static int smu_disable_dpms(struct smu_context *smu)
|
|||
*/
|
||||
if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
|
||||
ret = smu_disable_all_features_with_exception(smu,
|
||||
false,
|
||||
SMU_FEATURE_BACO_BIT);
|
||||
if (ret)
|
||||
dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
|
||||
|
@ -2232,6 +2241,15 @@ int smu_get_power_limit(void *handle,
|
|||
} else {
|
||||
switch (limit_level) {
|
||||
case SMU_PPT_LIMIT_CURRENT:
|
||||
if ((smu->adev->asic_type == CHIP_ALDEBARAN) ||
|
||||
(smu->adev->asic_type == CHIP_SIENNA_CICHLID) ||
|
||||
(smu->adev->asic_type == CHIP_NAVY_FLOUNDER) ||
|
||||
(smu->adev->asic_type == CHIP_DIMGREY_CAVEFISH) ||
|
||||
(smu->adev->asic_type == CHIP_BEIGE_GOBY))
|
||||
ret = smu_get_asic_power_limits(smu,
|
||||
&smu->current_power_limit,
|
||||
NULL,
|
||||
NULL);
|
||||
*limit = smu->current_power_limit;
|
||||
break;
|
||||
case SMU_PPT_LIMIT_DEFAULT:
|
||||
|
|
|
@ -211,6 +211,26 @@ static const struct cmn2asic_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_
|
|||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
};
|
||||
|
||||
static const uint8_t arcturus_throttler_map[] = {
|
||||
[THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
|
||||
[THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
|
||||
[THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
|
||||
[THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
|
||||
[THROTTLER_TEMP_VR_MEM_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
|
||||
[THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
|
||||
[THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
|
||||
[THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
|
||||
[THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
|
||||
[THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
|
||||
[THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
|
||||
[THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
|
||||
[THROTTLER_PPM_BIT] = (SMU_THROTTLER_PPM_BIT),
|
||||
[THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
|
||||
[THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT),
|
||||
[THROTTLER_VRHOT0_BIT] = (SMU_THROTTLER_VRHOT0_BIT),
|
||||
[THROTTLER_VRHOT1_BIT] = (SMU_THROTTLER_VRHOT1_BIT),
|
||||
};
|
||||
|
||||
static int arcturus_tables_init(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
|
@ -237,7 +257,7 @@ static int arcturus_tables_init(struct smu_context *smu)
|
|||
return -ENOMEM;
|
||||
smu_table->metrics_time = 0;
|
||||
|
||||
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
|
||||
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
|
||||
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
|
||||
if (!smu_table->gpu_metrics_table) {
|
||||
kfree(smu_table->metrics_table);
|
||||
|
@ -1174,7 +1194,10 @@ static int arcturus_get_fan_parameters(struct smu_context *smu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int arcturus_get_power_limit(struct smu_context *smu)
|
||||
static int arcturus_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
{
|
||||
struct smu_11_0_powerplay_table *powerplay_table =
|
||||
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
|
||||
|
@ -1190,17 +1213,24 @@ static int arcturus_get_power_limit(struct smu_context *smu)
|
|||
power_limit =
|
||||
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
|
||||
}
|
||||
smu->current_power_limit = smu->default_power_limit = power_limit;
|
||||
|
||||
if (smu->od_enabled) {
|
||||
od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
if (current_power_limit)
|
||||
*current_power_limit = power_limit;
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
|
||||
if (max_power_limit) {
|
||||
if (smu->od_enabled) {
|
||||
od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
|
||||
power_limit *= (100 + od_percent);
|
||||
power_limit /= 100;
|
||||
dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
|
||||
|
||||
power_limit *= (100 + od_percent);
|
||||
power_limit /= 100;
|
||||
}
|
||||
|
||||
*max_power_limit = power_limit;
|
||||
}
|
||||
smu->max_power_limit = power_limit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2278,8 +2308,8 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
|
|||
void **table)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct gpu_metrics_v1_1 *gpu_metrics =
|
||||
(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
|
||||
struct gpu_metrics_v1_3 *gpu_metrics =
|
||||
(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
|
||||
SmuMetrics_t metrics;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -2289,7 +2319,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
|
||||
|
||||
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
|
||||
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
|
||||
|
@ -2318,6 +2348,9 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
|
|||
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
|
||||
|
||||
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
|
||||
gpu_metrics->indep_throttle_status =
|
||||
smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
|
||||
arcturus_throttler_map);
|
||||
|
||||
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
|
||||
|
||||
|
@ -2330,7 +2363,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
|
|||
|
||||
*table = (void *)gpu_metrics;
|
||||
|
||||
return sizeof(struct gpu_metrics_v1_1);
|
||||
return sizeof(struct gpu_metrics_v1_3);
|
||||
}
|
||||
|
||||
static const struct pptable_funcs arcturus_ppt_funcs = {
|
||||
|
|
|
@ -238,6 +238,28 @@ static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] =
|
|||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
};
|
||||
|
||||
static const uint8_t navi1x_throttler_map[] = {
|
||||
[THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
|
||||
[THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
|
||||
[THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
|
||||
[THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
|
||||
[THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
|
||||
[THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
|
||||
[THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
|
||||
[THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
|
||||
[THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
|
||||
[THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
|
||||
[THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
|
||||
[THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
|
||||
[THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
|
||||
[THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
|
||||
[THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
|
||||
[THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
|
||||
[THROTTLER_PPM_BIT] = (SMU_THROTTLER_PPM_BIT),
|
||||
[THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT),
|
||||
};
|
||||
|
||||
|
||||
static bool is_asic_secure(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
@ -446,30 +468,6 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int navi10_set_mp1_state(struct smu_context *smu,
|
||||
enum pp_mp1_state mp1_state)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
uint32_t mp1_fw_flags;
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_cmn_set_mp1_state(smu, mp1_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (mp1_state == PP_MP1_STATE_UNLOAD) {
|
||||
mp1_fw_flags = RREG32_PCIE(MP1_Public |
|
||||
(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
|
||||
|
||||
mp1_fw_flags &= ~MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK;
|
||||
|
||||
WREG32_PCIE(MP1_Public |
|
||||
(smnMP1_FIRMWARE_FLAGS & 0xffffffff), mp1_fw_flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int navi10_setup_pptable(struct smu_context *smu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -2138,7 +2136,10 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int navi10_get_power_limit(struct smu_context *smu)
|
||||
static int navi10_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
{
|
||||
struct smu_11_0_powerplay_table *powerplay_table =
|
||||
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
|
||||
|
@ -2155,18 +2156,25 @@ static int navi10_get_power_limit(struct smu_context *smu)
|
|||
power_limit =
|
||||
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
|
||||
}
|
||||
smu->current_power_limit = smu->default_power_limit = power_limit;
|
||||
|
||||
if (smu->od_enabled &&
|
||||
navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
|
||||
od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
if (current_power_limit)
|
||||
*current_power_limit = power_limit;
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
|
||||
if (max_power_limit) {
|
||||
if (smu->od_enabled &&
|
||||
navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
|
||||
od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
|
||||
power_limit *= (100 + od_percent);
|
||||
power_limit /= 100;
|
||||
dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
|
||||
|
||||
power_limit *= (100 + od_percent);
|
||||
power_limit /= 100;
|
||||
}
|
||||
|
||||
*max_power_limit = power_limit;
|
||||
}
|
||||
smu->max_power_limit = power_limit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2257,6 +2265,29 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int navi10_baco_enter(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
if (adev->in_runpm)
|
||||
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
|
||||
else
|
||||
return smu_v11_0_baco_enter(smu);
|
||||
}
|
||||
|
||||
static int navi10_baco_exit(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
if (adev->in_runpm) {
|
||||
/* Wait for PMFW handling for the Dstate change */
|
||||
msleep(10);
|
||||
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
|
||||
} else {
|
||||
return smu_v11_0_baco_exit(smu);
|
||||
}
|
||||
}
|
||||
|
||||
static int navi10_set_default_od_settings(struct smu_context *smu)
|
||||
{
|
||||
OverDriveTable_t *od_table =
|
||||
|
@ -2676,6 +2707,9 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
|
|||
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
|
||||
|
||||
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
|
||||
gpu_metrics->indep_throttle_status =
|
||||
smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
|
||||
navi1x_throttler_map);
|
||||
|
||||
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
|
||||
|
||||
|
@ -2753,6 +2787,9 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
|
|||
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
|
||||
|
||||
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
|
||||
gpu_metrics->indep_throttle_status =
|
||||
smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
|
||||
navi1x_throttler_map);
|
||||
|
||||
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
|
||||
|
||||
|
@ -2829,6 +2866,9 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
|
|||
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
|
||||
|
||||
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
|
||||
gpu_metrics->indep_throttle_status =
|
||||
smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
|
||||
navi1x_throttler_map);
|
||||
|
||||
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
|
||||
|
||||
|
@ -2911,6 +2951,9 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
|
|||
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
|
||||
|
||||
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
|
||||
gpu_metrics->indep_throttle_status =
|
||||
smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
|
||||
navi1x_throttler_map);
|
||||
|
||||
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
|
||||
|
||||
|
@ -3095,8 +3138,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
|||
.baco_is_support = smu_v11_0_baco_is_support,
|
||||
.baco_get_state = smu_v11_0_baco_get_state,
|
||||
.baco_set_state = smu_v11_0_baco_set_state,
|
||||
.baco_enter = smu_v11_0_baco_enter,
|
||||
.baco_exit = smu_v11_0_baco_exit,
|
||||
.baco_enter = navi10_baco_enter,
|
||||
.baco_exit = navi10_baco_exit,
|
||||
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
|
||||
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
|
||||
.set_default_od_settings = navi10_set_default_od_settings,
|
||||
|
@ -3112,7 +3155,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
|||
.get_fan_parameters = navi10_get_fan_parameters,
|
||||
.post_init = navi10_post_smu_init,
|
||||
.interrupt_work = smu_v11_0_interrupt_work,
|
||||
.set_mp1_state = navi10_set_mp1_state,
|
||||
.set_mp1_state = smu_cmn_set_mp1_state,
|
||||
};
|
||||
|
||||
void navi10_set_ppt_funcs(struct smu_context *smu)
|
||||
|
|
|
@ -239,6 +239,27 @@ static struct cmn2asic_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_
|
|||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
};
|
||||
|
||||
static const uint8_t sienna_cichlid_throttler_map[] = {
|
||||
[THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
|
||||
[THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
|
||||
[THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
|
||||
[THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
|
||||
[THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
|
||||
[THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
|
||||
[THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
|
||||
[THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
|
||||
[THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
|
||||
[THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
|
||||
[THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
|
||||
[THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
|
||||
[THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
|
||||
[THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
|
||||
[THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
|
||||
[THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
|
||||
[THROTTLER_PPM_BIT] = (SMU_THROTTLER_PPM_BIT),
|
||||
[THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT),
|
||||
};
|
||||
|
||||
static int
|
||||
sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
|
||||
uint32_t *feature_mask, uint32_t num)
|
||||
|
@ -449,7 +470,7 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
|
|||
goto err0_out;
|
||||
smu_table->metrics_time = 0;
|
||||
|
||||
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
|
||||
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
|
||||
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
|
||||
if (!smu_table->gpu_metrics_table)
|
||||
goto err1_out;
|
||||
|
@ -1770,7 +1791,10 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int sienna_cichlid_get_power_limit(struct smu_context *smu)
|
||||
static int sienna_cichlid_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
{
|
||||
struct smu_11_0_7_powerplay_table *powerplay_table =
|
||||
(struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
|
||||
|
@ -1783,17 +1807,23 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu)
|
|||
power_limit =
|
||||
table_member[PPT_THROTTLER_PPT0];
|
||||
}
|
||||
smu->current_power_limit = smu->default_power_limit = power_limit;
|
||||
|
||||
if (smu->od_enabled) {
|
||||
od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
if (current_power_limit)
|
||||
*current_power_limit = power_limit;
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
|
||||
if (max_power_limit) {
|
||||
if (smu->od_enabled) {
|
||||
od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
|
||||
power_limit *= (100 + od_percent);
|
||||
power_limit /= 100;
|
||||
dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
|
||||
|
||||
power_limit *= (100 + od_percent);
|
||||
power_limit /= 100;
|
||||
}
|
||||
*max_power_limit = power_limit;
|
||||
}
|
||||
smu->max_power_limit = power_limit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2100,6 +2130,29 @@ static int sienna_cichlid_run_btc(struct smu_context *smu)
|
|||
return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
|
||||
}
|
||||
|
||||
static int sienna_cichlid_baco_enter(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
if (adev->in_runpm)
|
||||
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
|
||||
else
|
||||
return smu_v11_0_baco_enter(smu);
|
||||
}
|
||||
|
||||
static int sienna_cichlid_baco_exit(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
if (adev->in_runpm) {
|
||||
/* Wait for PMFW handling for the Dstate change */
|
||||
msleep(10);
|
||||
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
|
||||
} else {
|
||||
return smu_v11_0_baco_exit(smu);
|
||||
}
|
||||
}
|
||||
|
||||
static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
@ -3620,8 +3673,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
|
|||
void **table)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct gpu_metrics_v1_1 *gpu_metrics =
|
||||
(struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
|
||||
struct gpu_metrics_v1_3 *gpu_metrics =
|
||||
(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
|
||||
SmuMetricsExternal_t metrics_external;
|
||||
SmuMetrics_t *metrics =
|
||||
&(metrics_external.SmuMetrics);
|
||||
|
@ -3635,7 +3688,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
|
||||
|
||||
gpu_metrics->temperature_edge = metrics->TemperatureEdge;
|
||||
gpu_metrics->temperature_hotspot = metrics->TemperatureHotspot;
|
||||
|
@ -3670,6 +3723,9 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
|
|||
gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1];
|
||||
|
||||
gpu_metrics->throttle_status = metrics->ThrottlerStatus;
|
||||
gpu_metrics->indep_throttle_status =
|
||||
smu_cmn_get_indep_throttler_status(metrics->ThrottlerStatus,
|
||||
sienna_cichlid_throttler_map);
|
||||
|
||||
gpu_metrics->current_fan_speed = metrics->CurrFanSpeed;
|
||||
|
||||
|
@ -3692,7 +3748,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
|
|||
|
||||
*table = (void *)gpu_metrics;
|
||||
|
||||
return sizeof(struct gpu_metrics_v1_1);
|
||||
return sizeof(struct gpu_metrics_v1_3);
|
||||
}
|
||||
|
||||
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
|
||||
|
@ -3875,8 +3931,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
|
|||
.baco_is_support = smu_v11_0_baco_is_support,
|
||||
.baco_get_state = smu_v11_0_baco_get_state,
|
||||
.baco_set_state = smu_v11_0_baco_set_state,
|
||||
.baco_enter = smu_v11_0_baco_enter,
|
||||
.baco_exit = smu_v11_0_baco_exit,
|
||||
.baco_enter = sienna_cichlid_baco_enter,
|
||||
.baco_exit = sienna_cichlid_baco_exit,
|
||||
.mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported,
|
||||
.mode1_reset = smu_v11_0_mode1_reset,
|
||||
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
|
||||
|
|
|
@ -1474,7 +1474,8 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
|
|||
return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
|
||||
}
|
||||
|
||||
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
|
||||
int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
|
||||
enum smu_v11_0_baco_seq baco_seq)
|
||||
{
|
||||
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
|
||||
}
|
||||
|
@ -1578,16 +1579,8 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
|
|||
|
||||
int smu_v11_0_baco_enter(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
/* Arcturus does not need this audio workaround */
|
||||
if (adev->asic_type != CHIP_ARCTURUS) {
|
||||
ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -190,6 +190,20 @@ static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT]
|
|||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
};
|
||||
|
||||
static const uint8_t vangogh_throttler_map[] = {
|
||||
[THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT),
|
||||
[THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT),
|
||||
[THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT),
|
||||
[THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT),
|
||||
[THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT),
|
||||
[THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT),
|
||||
[THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT),
|
||||
[THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT),
|
||||
[THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT),
|
||||
[THROTTLER_STATUS_BIT_TDC_GFX] = (SMU_THROTTLER_TDC_GFX_BIT),
|
||||
[THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT),
|
||||
};
|
||||
|
||||
static int vangogh_tables_init(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
|
@ -226,7 +240,7 @@ static int vangogh_tables_init(struct smu_context *smu)
|
|||
goto err0_out;
|
||||
smu_table->metrics_time = 0;
|
||||
|
||||
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1);
|
||||
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
|
||||
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
|
||||
if (!smu_table->gpu_metrics_table)
|
||||
goto err1_out;
|
||||
|
@ -1632,8 +1646,8 @@ static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
|
|||
void **table)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct gpu_metrics_v2_1 *gpu_metrics =
|
||||
(struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
|
||||
struct gpu_metrics_v2_2 *gpu_metrics =
|
||||
(struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
|
||||
SmuMetrics_legacy_t metrics;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1641,7 +1655,7 @@ static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
|
||||
|
||||
gpu_metrics->temperature_gfx = metrics.GfxTemperature;
|
||||
gpu_metrics->temperature_soc = metrics.SocTemperature;
|
||||
|
@ -1674,20 +1688,23 @@ static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
|
|||
gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
|
||||
|
||||
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
|
||||
gpu_metrics->indep_throttle_status =
|
||||
smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
|
||||
vangogh_throttler_map);
|
||||
|
||||
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
|
||||
|
||||
*table = (void *)gpu_metrics;
|
||||
|
||||
return sizeof(struct gpu_metrics_v2_1);
|
||||
return sizeof(struct gpu_metrics_v2_2);
|
||||
}
|
||||
|
||||
static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
|
||||
void **table)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct gpu_metrics_v2_1 *gpu_metrics =
|
||||
(struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
|
||||
struct gpu_metrics_v2_2 *gpu_metrics =
|
||||
(struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
|
||||
SmuMetrics_t metrics;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1695,7 +1712,7 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
|
||||
|
||||
gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
|
||||
gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
|
||||
|
@ -1735,12 +1752,15 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
|
|||
gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
|
||||
|
||||
gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
|
||||
gpu_metrics->indep_throttle_status =
|
||||
smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
|
||||
vangogh_throttler_map);
|
||||
|
||||
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
|
||||
|
||||
*table = (void *)gpu_metrics;
|
||||
|
||||
return sizeof(struct gpu_metrics_v2_1);
|
||||
return sizeof(struct gpu_metrics_v2_2);
|
||||
}
|
||||
|
||||
static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
|
||||
|
@ -2051,7 +2071,10 @@ static int vangogh_mode2_reset(struct smu_context *smu)
|
|||
return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
|
||||
}
|
||||
|
||||
static int vangogh_get_power_limit(struct smu_context *smu)
|
||||
static int vangogh_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
{
|
||||
struct smu_11_5_power_context *power_context =
|
||||
smu->smu_power.power_context;
|
||||
|
@ -2067,8 +2090,12 @@ static int vangogh_get_power_limit(struct smu_context *smu)
|
|||
return ret;
|
||||
}
|
||||
/* convert from milliwatt to watt */
|
||||
smu->current_power_limit = smu->default_power_limit = ppt_limit / 1000;
|
||||
smu->max_power_limit = 29;
|
||||
if (current_power_limit)
|
||||
*current_power_limit = ppt_limit / 1000;
|
||||
if (default_power_limit)
|
||||
*default_power_limit = ppt_limit / 1000;
|
||||
if (max_power_limit)
|
||||
*max_power_limit = 29;
|
||||
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
|
||||
if (ret) {
|
||||
|
|
|
@ -128,6 +128,22 @@ static struct cmn2asic_mapping renoir_workload_map[PP_SMC_POWER_PROFILE_COUNT] =
|
|||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
};
|
||||
|
||||
static const uint8_t renoir_throttler_map[] = {
|
||||
[THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT),
|
||||
[THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT),
|
||||
[THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT),
|
||||
[THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT),
|
||||
[THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT),
|
||||
[THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT),
|
||||
[THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT),
|
||||
[THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT),
|
||||
[THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT),
|
||||
[THROTTLER_STATUS_BIT_PROCHOT_CPU] = (SMU_THROTTLER_PROCHOT_CPU_BIT),
|
||||
[THROTTLER_STATUS_BIT_PROCHOT_GFX] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
|
||||
[THROTTLER_STATUS_BIT_EDC_CPU] = (SMU_THROTTLER_EDC_CPU_BIT),
|
||||
[THROTTLER_STATUS_BIT_EDC_GFX] = (SMU_THROTTLER_EDC_GFX_BIT),
|
||||
};
|
||||
|
||||
static int renoir_init_smc_tables(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
|
@ -153,7 +169,7 @@ static int renoir_init_smc_tables(struct smu_context *smu)
|
|||
if (!smu_table->watermarks_table)
|
||||
goto err2_out;
|
||||
|
||||
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1);
|
||||
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
|
||||
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
|
||||
if (!smu_table->gpu_metrics_table)
|
||||
goto err3_out;
|
||||
|
@ -1164,6 +1180,28 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
|
|||
case METRICS_VOLTAGE_VDDSOC:
|
||||
*value = metrics->Voltage[1];
|
||||
break;
|
||||
case METRICS_SS_APU_SHARE:
|
||||
/* return the percentage of APU power with respect to APU's power limit.
|
||||
* percentage is reported, this isn't boost value. Smartshift power
|
||||
* boost/shift is only when the percentage is more than 100.
|
||||
*/
|
||||
if (metrics->StapmOriginalLimit > 0)
|
||||
*value = (metrics->ApuPower * 100) / metrics->StapmOriginalLimit;
|
||||
else
|
||||
*value = 0;
|
||||
break;
|
||||
case METRICS_SS_DGPU_SHARE:
|
||||
/* return the percentage of dGPU power with respect to dGPU's power limit.
|
||||
* percentage is reported, this isn't boost value. Smartshift power
|
||||
* boost/shift is only when the percentage is more than 100.
|
||||
*/
|
||||
if ((metrics->dGpuPower > 0) &&
|
||||
(metrics->StapmCurrentLimit > metrics->StapmOriginalLimit))
|
||||
*value = (metrics->dGpuPower * 100) /
|
||||
(metrics->StapmCurrentLimit - metrics->StapmOriginalLimit);
|
||||
else
|
||||
*value = 0;
|
||||
break;
|
||||
default:
|
||||
*value = UINT_MAX;
|
||||
break;
|
||||
|
@ -1235,6 +1273,18 @@ static int renoir_read_sensor(struct smu_context *smu,
|
|||
(uint32_t *)data);
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_SS_APU_SHARE:
|
||||
ret = renoir_get_smu_metrics_data(smu,
|
||||
METRICS_SS_APU_SHARE,
|
||||
(uint32_t *)data);
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
|
||||
ret = renoir_get_smu_metrics_data(smu,
|
||||
METRICS_SS_DGPU_SHARE,
|
||||
(uint32_t *)data);
|
||||
*size = 4;
|
||||
break;
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
|
@ -1264,8 +1314,8 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
|
|||
void **table)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct gpu_metrics_v2_1 *gpu_metrics =
|
||||
(struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
|
||||
struct gpu_metrics_v2_2 *gpu_metrics =
|
||||
(struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
|
||||
SmuMetrics_t metrics;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1273,7 +1323,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
|
||||
|
||||
gpu_metrics->temperature_gfx = metrics.GfxTemperature;
|
||||
gpu_metrics->temperature_soc = metrics.SocTemperature;
|
||||
|
@ -1311,6 +1361,9 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
|
|||
gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1];
|
||||
|
||||
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
|
||||
gpu_metrics->indep_throttle_status =
|
||||
smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
|
||||
renoir_throttler_map);
|
||||
|
||||
gpu_metrics->fan_pwm = metrics.FanPwm;
|
||||
|
||||
|
@ -1318,7 +1371,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
|
|||
|
||||
*table = (void *)gpu_metrics;
|
||||
|
||||
return sizeof(struct gpu_metrics_v2_1);
|
||||
return sizeof(struct gpu_metrics_v2_2);
|
||||
}
|
||||
|
||||
static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
|
||||
|
|
|
@ -191,6 +191,20 @@ static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = {
|
|||
TAB_MAP(I2C_COMMANDS),
|
||||
};
|
||||
|
||||
static const uint8_t aldebaran_throttler_map[] = {
|
||||
[THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
|
||||
[THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
|
||||
[THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
|
||||
[THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
|
||||
[THROTTLER_TDC_HBM_BIT] = (SMU_THROTTLER_TDC_MEM_BIT),
|
||||
[THROTTLER_TEMP_GPU_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT),
|
||||
[THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
|
||||
[THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
|
||||
[THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
|
||||
[THROTTLER_TEMP_VR_MEM_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
|
||||
[THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT),
|
||||
};
|
||||
|
||||
static int aldebaran_tables_init(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
|
@ -213,7 +227,7 @@ static int aldebaran_tables_init(struct smu_context *smu)
|
|||
return -ENOMEM;
|
||||
smu_table->metrics_time = 0;
|
||||
|
||||
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_2);
|
||||
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
|
||||
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
|
||||
if (!smu_table->gpu_metrics_table) {
|
||||
kfree(smu_table->metrics_table);
|
||||
|
@ -510,6 +524,16 @@ static int aldebaran_freqs_in_same_level(int32_t frequency1,
|
|||
return (abs(frequency1 - frequency2) <= EPSILON);
|
||||
}
|
||||
|
||||
static bool aldebaran_is_primary(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
|
||||
return adev->smuio.funcs->get_die_id(adev) == 0;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
|
||||
MetricsMember_t member,
|
||||
uint32_t *value)
|
||||
|
@ -563,7 +587,10 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
|
|||
*value = metrics->AverageUclkActivity;
|
||||
break;
|
||||
case METRICS_AVERAGE_SOCKETPOWER:
|
||||
*value = metrics->AverageSocketPower << 8;
|
||||
/* Valid power data is available only from primary die */
|
||||
*value = aldebaran_is_primary(smu) ?
|
||||
metrics->AverageSocketPower << 8 :
|
||||
0;
|
||||
break;
|
||||
case METRICS_TEMPERATURE_EDGE:
|
||||
*value = metrics->TemperatureEdge *
|
||||
|
@ -1132,7 +1159,10 @@ static int aldebaran_read_sensor(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int aldebaran_get_power_limit(struct smu_context *smu)
|
||||
static int aldebaran_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
uint32_t *max_power_limit)
|
||||
{
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
uint32_t power_limit = 0;
|
||||
|
@ -1141,24 +1171,46 @@ static int aldebaran_get_power_limit(struct smu_context *smu)
|
|||
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
|
||||
/* Valid power data is available only from primary die.
|
||||
* For secondary die show the value as 0.
|
||||
*/
|
||||
if (aldebaran_is_primary(smu)) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit,
|
||||
&power_limit);
|
||||
|
||||
if (ret) {
|
||||
/* the last hope to figure out the ppt limit */
|
||||
if (!pptable) {
|
||||
dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!");
|
||||
return -EINVAL;
|
||||
if (ret) {
|
||||
/* the last hope to figure out the ppt limit */
|
||||
if (!pptable) {
|
||||
dev_err(smu->adev->dev,
|
||||
"Cannot get PPT limit due to pptable missing!");
|
||||
return -EINVAL;
|
||||
}
|
||||
power_limit = pptable->PptLimit;
|
||||
}
|
||||
power_limit = pptable->PptLimit;
|
||||
}
|
||||
|
||||
smu->current_power_limit = smu->default_power_limit = power_limit;
|
||||
if (pptable)
|
||||
smu->max_power_limit = pptable->PptLimit;
|
||||
if (current_power_limit)
|
||||
*current_power_limit = power_limit;
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
if (max_power_limit) {
|
||||
if (pptable)
|
||||
*max_power_limit = pptable->PptLimit;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aldebaran_set_power_limit(struct smu_context *smu, uint32_t n)
|
||||
{
|
||||
/* Power limit can be set only through primary die */
|
||||
if (aldebaran_is_primary(smu))
|
||||
return smu_v13_0_set_power_limit(smu, n);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int aldebaran_system_features_control(struct smu_context *smu, bool enable)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1706,8 +1758,8 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
|
|||
void **table)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct gpu_metrics_v1_2 *gpu_metrics =
|
||||
(struct gpu_metrics_v1_2 *)smu_table->gpu_metrics_table;
|
||||
struct gpu_metrics_v1_3 *gpu_metrics =
|
||||
(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
|
||||
SmuMetrics_t metrics;
|
||||
int i, ret = 0;
|
||||
|
||||
|
@ -1717,7 +1769,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 2);
|
||||
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
|
||||
|
||||
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
|
||||
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
|
||||
|
@ -1730,10 +1782,16 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
|
|||
gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
|
||||
gpu_metrics->average_mm_activity = 0;
|
||||
|
||||
gpu_metrics->average_socket_power = metrics.AverageSocketPower;
|
||||
gpu_metrics->energy_accumulator =
|
||||
/* Valid power data is available only from primary die */
|
||||
if (aldebaran_is_primary(smu)) {
|
||||
gpu_metrics->average_socket_power = metrics.AverageSocketPower;
|
||||
gpu_metrics->energy_accumulator =
|
||||
(uint64_t)metrics.EnergyAcc64bitHigh << 32 |
|
||||
metrics.EnergyAcc64bitLow;
|
||||
} else {
|
||||
gpu_metrics->average_socket_power = 0;
|
||||
gpu_metrics->energy_accumulator = 0;
|
||||
}
|
||||
|
||||
gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
|
||||
gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
|
||||
|
@ -1748,6 +1806,9 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
|
|||
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
|
||||
|
||||
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
|
||||
gpu_metrics->indep_throttle_status =
|
||||
smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
|
||||
aldebaran_throttler_map);
|
||||
|
||||
gpu_metrics->current_fan_speed = 0;
|
||||
|
||||
|
@ -1769,7 +1830,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
|
|||
|
||||
*table = (void *)gpu_metrics;
|
||||
|
||||
return sizeof(struct gpu_metrics_v1_2);
|
||||
return sizeof(struct gpu_metrics_v1_3);
|
||||
}
|
||||
|
||||
static int aldebaran_mode2_reset(struct smu_context *smu)
|
||||
|
@ -1898,7 +1959,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
|
|||
.get_enabled_mask = smu_cmn_get_enabled_mask,
|
||||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
|
||||
.set_power_limit = smu_v13_0_set_power_limit,
|
||||
.set_power_limit = aldebaran_set_power_limit,
|
||||
.init_max_sustainable_clocks = smu_v13_0_init_max_sustainable_clocks,
|
||||
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
|
||||
.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
|
||||
|
|
|
@ -356,6 +356,28 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
|
|||
case METRICS_VOLTAGE_VDDSOC:
|
||||
*value = metrics->Voltage[1];
|
||||
break;
|
||||
case METRICS_SS_APU_SHARE:
|
||||
/* return the percentage of APU power with respect to APU's power limit.
|
||||
* percentage is reported, this isn't boost value. Smartshift power
|
||||
* boost/shift is only when the percentage is more than 100.
|
||||
*/
|
||||
if (metrics->StapmOpnLimit > 0)
|
||||
*value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit;
|
||||
else
|
||||
*value = 0;
|
||||
break;
|
||||
case METRICS_SS_DGPU_SHARE:
|
||||
/* return the percentage of dGPU power with respect to dGPU's power limit.
|
||||
* percentage is reported, this isn't boost value. Smartshift power
|
||||
* boost/shift is only when the percentage is more than 100.
|
||||
*/
|
||||
if ((metrics->dGpuPower > 0) &&
|
||||
(metrics->StapmCurrentLimit > metrics->StapmOpnLimit))
|
||||
*value = (metrics->dGpuPower * 100) /
|
||||
(metrics->StapmCurrentLimit - metrics->StapmOpnLimit);
|
||||
else
|
||||
*value = 0;
|
||||
break;
|
||||
default:
|
||||
*value = UINT_MAX;
|
||||
break;
|
||||
|
@ -427,6 +449,18 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
|
|||
(uint32_t *)data);
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_SS_APU_SHARE:
|
||||
ret = yellow_carp_get_smu_metrics_data(smu,
|
||||
METRICS_SS_APU_SHARE,
|
||||
(uint32_t *)data);
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
|
||||
ret = yellow_carp_get_smu_metrics_data(smu,
|
||||
METRICS_SS_DGPU_SHARE,
|
||||
(uint32_t *)data);
|
||||
*size = 4;
|
||||
break;
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
|
|
|
@ -398,6 +398,19 @@ int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
|
|||
|
||||
}
|
||||
|
||||
uint64_t smu_cmn_get_indep_throttler_status(
|
||||
const unsigned long dep_status,
|
||||
const uint8_t *throttler_map)
|
||||
{
|
||||
uint64_t indep_status = 0;
|
||||
uint8_t dep_bit = 0;
|
||||
|
||||
for_each_set_bit(dep_bit, &dep_status, 32)
|
||||
indep_status |= 1ULL << throttler_map[dep_bit];
|
||||
|
||||
return indep_status;
|
||||
}
|
||||
|
||||
int smu_cmn_feature_update_enable_state(struct smu_context *smu,
|
||||
uint64_t feature_mask,
|
||||
bool enabled)
|
||||
|
@ -575,23 +588,52 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* smu_cmn_disable_all_features_with_exception - disable all dpm features
|
||||
* except this specified by
|
||||
* @mask
|
||||
*
|
||||
* @smu: smu_context pointer
|
||||
* @no_hw_disablement: whether real dpm disablement should be performed
|
||||
* true: update the cache(about dpm enablement state) only
|
||||
* false: real dpm disablement plus cache update
|
||||
* @mask: the dpm feature which should not be disabled
|
||||
* SMU_FEATURE_COUNT: no exception, all dpm features
|
||||
* to disable
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
|
||||
bool no_hw_disablement,
|
||||
enum smu_feature_mask mask)
|
||||
{
|
||||
struct smu_feature *feature = &smu->smu_feature;
|
||||
uint64_t features_to_disable = U64_MAX;
|
||||
int skipped_feature_id;
|
||||
|
||||
skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_FEATURE,
|
||||
mask);
|
||||
if (skipped_feature_id < 0)
|
||||
return -EINVAL;
|
||||
if (mask != SMU_FEATURE_COUNT) {
|
||||
skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_FEATURE,
|
||||
mask);
|
||||
if (skipped_feature_id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
features_to_disable &= ~(1ULL << skipped_feature_id);
|
||||
features_to_disable &= ~(1ULL << skipped_feature_id);
|
||||
}
|
||||
|
||||
return smu_cmn_feature_update_enable_state(smu,
|
||||
features_to_disable,
|
||||
0);
|
||||
if (no_hw_disablement) {
|
||||
mutex_lock(&feature->mutex);
|
||||
bitmap_andnot(feature->enabled, feature->enabled,
|
||||
(unsigned long *)(&features_to_disable), SMU_FEATURE_MAX);
|
||||
mutex_unlock(&feature->mutex);
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
return smu_cmn_feature_update_enable_state(smu,
|
||||
features_to_disable,
|
||||
0);
|
||||
}
|
||||
}
|
||||
|
||||
int smu_cmn_get_smc_version(struct smu_context *smu,
|
||||
|
@ -773,6 +815,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
|
|||
case METRICS_VERSION(2, 1):
|
||||
structure_size = sizeof(struct gpu_metrics_v2_1);
|
||||
break;
|
||||
case METRICS_VERSION(2, 2):
|
||||
structure_size = sizeof(struct gpu_metrics_v2_2);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue