mirror of https://gitee.com/openkylin/linux.git
amd fixes for 4.18-rc1
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJbI0DcAAoJEAx081l5xIa+lJUP/2jY5MmTArMTJb7EqdBL4EJA 6aYAfLyoQZFKgkGE+qq6DsKPGRo82VeOorVjY3SZ3DhFMNUylu79zDL/2Qbtejrw 2K2Gd+JtC9I2uZIYwwKTVRk7Z3w/Y2stf9Sd8AA1pY4Ak3RpEKxav8NubmrET8Mu 7y7BPekHyn5kfTA8x6R71sR3tNxWSX9YT6kEsWh2I9wykyVs+za7qRfuQyO/wm0Z AhqsZ/7cbmJQtGcVEU9hgSRcbmKDD7E/w6dczGZXYOXkVVmAOHM6As3D3SH2EIga fZWhLOM2ri6o9UZ1sMqa2gKW0DvSMiMdkpAK+WMgke7uVGwfk/PVaHXczM7oZBIa be/wE7Ilf5BgvfUezq1hs1LXMC769urNKzE85b3sDTYlFvGYjiPN1IXSqMazTdAI yrQA2sE1mOaU8mgoTfDmWJtUau1yRF6Nn6HS0A+vHvgUM/UeoO3XtdMwszfMOgS9 fucnB6rNAK1HRfv2DZ29+dC2zUEiRbITPcKEw1PHnDlRW0EnK9vMrPEABbIkk+nf 9cBNgYSa1SO4UmZdkNxRjeNq3RlyoiQf8XQ9UQKnx4XTyzwFKOTWclJ1gVfL0JSE P2so8hSj0rWQwVdy0UA+DhMmvleYu0tKXeRnpuu02H9nmA1h5TH4YyDxQhH9H4w1 6xxek0+cM6ISMDCzOjRZ =YBRl -----END PGP SIGNATURE----- Merge tag 'drm-next-2018-06-15' of git://anongit.freedesktop.org/drm/drm Pull amd drm fixes from Dave Airlie: "Just a single set of AMD fixes for stuff in -next for -rc1" * tag 'drm-next-2018-06-15' of git://anongit.freedesktop.org/drm/drm: (47 commits) drm/amd/powerplay: Set higher SCLK&MCLK frequency than dpm7 in OD (v2) drm/amd/powerplay: remove uncessary extra gfxoff control call drm/amdgpu: fix parsing indirect register list v2 drm/amd/include: Update df 3.6 mask and shift definition drm/amd/pp: Fix OD feature enable failed on Vega10 workstation cards drm/amd/display: Fix stale buffer object (bo) use drm/amd/pp: initialize result to before or'ing in data drm/amd/powerplay: fix wrong clock adjust sequence drm/amdgpu: Grab/put runtime PM references in atomic_commit_tail() drm/amd/powerplay: fix missed hwmgr check warning before call gfx_off_control handler drm/amdgpu: fix CG enabling hang with gfxoff enabled drm/amdgpu: fix clear_all and replace handling in the VM (v2) drm/amdgpu: add checking for sos version drm/amdgpu: fix the missed vcn fw version report Revert "drm/amdgpu: Add an ATPX quirk for hybrid laptop" drm/amdgpu/df: fix potential array out-of-bounds read drm/amdgpu: Fix NULL pointer when load kfd driver with PP block is disabled drm/gfx9: Update gc goldensetting for vega20. drm/amd/pp: Allow underclocking when od table is empty in vbios drm/amdgpu/display: check if ppfuncs exists before using it ...
This commit is contained in:
commit
becfc5e97c
|
@ -342,15 +342,12 @@ void get_local_mem_info(struct kgd_dev *kgd,
|
|||
mem_info->local_mem_size_public,
|
||||
mem_info->local_mem_size_private);
|
||||
|
||||
if (amdgpu_emu_mode == 1) {
|
||||
mem_info->mem_clk_max = 100;
|
||||
return;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
mem_info->mem_clk_max = adev->clock.default_mclk / 100;
|
||||
else
|
||||
else if (adev->powerplay.pp_funcs)
|
||||
mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
|
||||
else
|
||||
mem_info->mem_clk_max = 100;
|
||||
}
|
||||
|
||||
uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
|
||||
|
@ -367,13 +364,12 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
|
||||
/* the sclk is in quantas of 10kHz */
|
||||
if (amdgpu_emu_mode == 1)
|
||||
return 100;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return adev->clock.default_sclk / 100;
|
||||
|
||||
return amdgpu_dpm_get_sclk(adev, false) / 100;
|
||||
else if (adev->powerplay.pp_funcs)
|
||||
return amdgpu_dpm_get_sclk(adev, false) / 100;
|
||||
else
|
||||
return 100;
|
||||
}
|
||||
|
||||
void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
|
||||
|
|
|
@ -569,7 +569,6 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
|
|||
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||
{ 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||
{ 0, 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
|
|
@ -522,6 +522,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||
struct amdgpu_bo_list_entry *e;
|
||||
struct list_head duplicates;
|
||||
unsigned i, tries = 10;
|
||||
struct amdgpu_bo *gds;
|
||||
struct amdgpu_bo *gws;
|
||||
struct amdgpu_bo *oa;
|
||||
int r;
|
||||
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
|
@ -652,31 +655,36 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||
|
||||
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
|
||||
p->bytes_moved_vis);
|
||||
|
||||
if (p->bo_list) {
|
||||
struct amdgpu_bo *gds = p->bo_list->gds_obj;
|
||||
struct amdgpu_bo *gws = p->bo_list->gws_obj;
|
||||
struct amdgpu_bo *oa = p->bo_list->oa_obj;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
unsigned i;
|
||||
|
||||
gds = p->bo_list->gds_obj;
|
||||
gws = p->bo_list->gws_obj;
|
||||
oa = p->bo_list->oa_obj;
|
||||
for (i = 0; i < p->bo_list->num_entries; i++) {
|
||||
struct amdgpu_bo *bo = p->bo_list->array[i].robj;
|
||||
|
||||
p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
|
||||
}
|
||||
} else {
|
||||
gds = p->adev->gds.gds_gfx_bo;
|
||||
gws = p->adev->gds.gws_gfx_bo;
|
||||
oa = p->adev->gds.oa_gfx_bo;
|
||||
}
|
||||
|
||||
if (gds) {
|
||||
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
|
||||
p->job->gds_size = amdgpu_bo_size(gds);
|
||||
}
|
||||
if (gws) {
|
||||
p->job->gws_base = amdgpu_bo_gpu_offset(gws);
|
||||
p->job->gws_size = amdgpu_bo_size(gws);
|
||||
}
|
||||
if (oa) {
|
||||
p->job->oa_base = amdgpu_bo_gpu_offset(oa);
|
||||
p->job->oa_size = amdgpu_bo_size(oa);
|
||||
}
|
||||
if (gds) {
|
||||
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
|
||||
p->job->gds_size = amdgpu_bo_size(gds);
|
||||
}
|
||||
if (gws) {
|
||||
p->job->gws_base = amdgpu_bo_gpu_offset(gws);
|
||||
p->job->gws_size = amdgpu_bo_size(gws);
|
||||
}
|
||||
if (oa) {
|
||||
p->job->oa_base = amdgpu_bo_gpu_offset(oa);
|
||||
p->job->oa_size = amdgpu_bo_size(oa);
|
||||
}
|
||||
|
||||
if (!r && p->uf_entry.robj) {
|
||||
|
|
|
@ -1730,6 +1730,18 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) {
|
||||
/* enable gfx powergating */
|
||||
amdgpu_device_ip_set_powergating_state(adev,
|
||||
AMD_IP_BLOCK_TYPE_GFX,
|
||||
AMD_PG_STATE_GATE);
|
||||
/* enable gfxoff */
|
||||
amdgpu_device_ip_set_powergating_state(adev,
|
||||
AMD_IP_BLOCK_TYPE_SMC,
|
||||
AMD_PG_STATE_GATE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_display.h"
|
||||
|
||||
void amdgpu_gem_object_free(struct drm_gem_object *gobj)
|
||||
{
|
||||
|
@ -235,6 +236,13 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
/* create a gem object to contain this object in */
|
||||
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
|
||||
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
|
||||
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
|
||||
/* if gds bo is created from user space, it must be
|
||||
* passed to bo list
|
||||
*/
|
||||
DRM_ERROR("GDS bo cannot be per-vm-bo\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
|
||||
if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
|
||||
size = size << AMDGPU_GDS_SHIFT;
|
||||
|
@ -749,15 +757,16 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_gem_object *gobj;
|
||||
uint32_t handle;
|
||||
u32 domain;
|
||||
int r;
|
||||
|
||||
args->pitch = amdgpu_align_pitch(adev, args->width,
|
||||
DIV_ROUND_UP(args->bpp, 8), 0);
|
||||
args->size = (u64)args->pitch * args->height;
|
||||
args->size = ALIGN(args->size, PAGE_SIZE);
|
||||
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
domain = amdgpu_bo_get_preferred_pin_domain(adev,
|
||||
amdgpu_display_supported_domains(adev));
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0, domain,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
false, NULL, &gobj);
|
||||
if (r)
|
||||
|
|
|
@ -703,11 +703,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
|
||||
* See function amdgpu_display_supported_domains()
|
||||
*/
|
||||
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
|
||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
|
||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
}
|
||||
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
|
||||
|
||||
if (bo->pin_count) {
|
||||
uint32_t mem_type = bo->tbo.mem.mem_type;
|
||||
|
@ -1066,3 +1062,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
|||
|
||||
return bo->tbo.offset;
|
||||
}
|
||||
|
||||
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
|
||||
uint32_t domain)
|
||||
{
|
||||
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
|
||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
|
||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
}
|
||||
return domain;
|
||||
}
|
||||
|
|
|
@ -289,7 +289,8 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
|
|||
struct reservation_object *resv,
|
||||
struct dma_fence **fence,
|
||||
bool direct);
|
||||
|
||||
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
|
||||
uint32_t domain);
|
||||
|
||||
/*
|
||||
* sub allocation
|
||||
|
|
|
@ -49,8 +49,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
|
|||
|
||||
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_sched_rq *rq;
|
||||
unsigned long bo_size;
|
||||
const char *fw_name;
|
||||
const struct common_firmware_header *hdr;
|
||||
|
@ -84,6 +82,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
|
||||
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
|
||||
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
|
||||
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
|
||||
|
@ -102,24 +101,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
ring = &adev->vcn.ring_dec;
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
|
||||
rq, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up VCN dec run queue.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
ring = &adev->vcn.ring_enc[0];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
|
||||
rq, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up VCN enc run queue.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -129,10 +110,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
|
|||
|
||||
kfree(adev->vcn.saved_bo);
|
||||
|
||||
drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
|
||||
|
||||
drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
|
||||
&adev->vcn.gpu_addr,
|
||||
(void **)&adev->vcn.cpu_addr);
|
||||
|
@ -278,7 +255,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
|
|||
}
|
||||
|
||||
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
|
||||
struct amdgpu_bo *bo, bool direct,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
@ -306,19 +283,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
|
|||
}
|
||||
ib->length_dw = 16;
|
||||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err_free;
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err_free;
|
||||
}
|
||||
amdgpu_job_free(job);
|
||||
|
||||
amdgpu_bo_fence(bo, f, false);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
|
@ -370,11 +340,11 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
|||
for (i = 14; i < 1024; ++i)
|
||||
msg[i] = cpu_to_le32(0x0);
|
||||
|
||||
return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
|
||||
return amdgpu_vcn_dec_send_msg(ring, bo, fence);
|
||||
}
|
||||
|
||||
static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
bool direct, struct dma_fence **fence)
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
|
@ -396,7 +366,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
|||
for (i = 6; i < 1024; ++i)
|
||||
msg[i] = cpu_to_le32(0x0);
|
||||
|
||||
return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
|
||||
return amdgpu_vcn_dec_send_msg(ring, bo, fence);
|
||||
}
|
||||
|
||||
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
|
@ -410,7 +380,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
goto error;
|
||||
}
|
||||
|
||||
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
|
||||
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
|
||||
goto error;
|
||||
|
|
|
@ -67,8 +67,6 @@ struct amdgpu_vcn {
|
|||
struct amdgpu_ring ring_dec;
|
||||
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
|
||||
struct amdgpu_irq_src irq;
|
||||
struct drm_sched_entity entity_dec;
|
||||
struct drm_sched_entity entity_enc;
|
||||
unsigned num_enc_rings;
|
||||
};
|
||||
|
||||
|
|
|
@ -2123,7 +2123,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||
before->last = saddr - 1;
|
||||
before->offset = tmp->offset;
|
||||
before->flags = tmp->flags;
|
||||
list_add(&before->list, &tmp->list);
|
||||
before->bo_va = tmp->bo_va;
|
||||
list_add(&before->list, &tmp->bo_va->invalids);
|
||||
}
|
||||
|
||||
/* Remember mapping split at the end */
|
||||
|
@ -2133,7 +2134,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||
after->offset = tmp->offset;
|
||||
after->offset += after->start - tmp->start;
|
||||
after->flags = tmp->flags;
|
||||
list_add(&after->list, &tmp->list);
|
||||
after->bo_va = tmp->bo_va;
|
||||
list_add(&after->list, &tmp->bo_va->invalids);
|
||||
}
|
||||
|
||||
list_del(&tmp->list);
|
||||
|
|
|
@ -64,7 +64,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
|
|||
int fb_channel_number;
|
||||
|
||||
fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
|
||||
if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number))
|
||||
if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number))
|
||||
fb_channel_number = 0;
|
||||
|
||||
return df_v3_6_channel_number[fb_channel_number];
|
||||
|
|
|
@ -111,6 +111,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
|
|||
|
||||
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
|
||||
|
@ -1837,13 +1838,15 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
|
|||
int indirect_offset,
|
||||
int list_size,
|
||||
int *unique_indirect_regs,
|
||||
int *unique_indirect_reg_count,
|
||||
int unique_indirect_reg_count,
|
||||
int *indirect_start_offsets,
|
||||
int *indirect_start_offsets_count)
|
||||
int *indirect_start_offsets_count,
|
||||
int max_start_offsets_count)
|
||||
{
|
||||
int idx;
|
||||
|
||||
for (; indirect_offset < list_size; indirect_offset++) {
|
||||
WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
|
||||
indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
|
||||
*indirect_start_offsets_count = *indirect_start_offsets_count + 1;
|
||||
|
||||
|
@ -1851,14 +1854,14 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
|
|||
indirect_offset += 2;
|
||||
|
||||
/* look for the matching indice */
|
||||
for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
|
||||
for (idx = 0; idx < unique_indirect_reg_count; idx++) {
|
||||
if (unique_indirect_regs[idx] ==
|
||||
register_list_format[indirect_offset] ||
|
||||
!unique_indirect_regs[idx])
|
||||
break;
|
||||
}
|
||||
|
||||
BUG_ON(idx >= *unique_indirect_reg_count);
|
||||
BUG_ON(idx >= unique_indirect_reg_count);
|
||||
|
||||
if (!unique_indirect_regs[idx])
|
||||
unique_indirect_regs[idx] = register_list_format[indirect_offset];
|
||||
|
@ -1893,9 +1896,10 @@ static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
|
|||
adev->gfx.rlc.reg_list_format_direct_reg_list_length,
|
||||
adev->gfx.rlc.reg_list_format_size_bytes >> 2,
|
||||
unique_indirect_regs,
|
||||
&unique_indirect_reg_count,
|
||||
unique_indirect_reg_count,
|
||||
indirect_start_offsets,
|
||||
&indirect_start_offsets_count);
|
||||
&indirect_start_offsets_count,
|
||||
ARRAY_SIZE(indirect_start_offsets));
|
||||
|
||||
/* enable auto inc in case it is disabled */
|
||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
|
||||
|
@ -3404,11 +3408,6 @@ static int gfx_v9_0_late_init(void *handle)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
|
||||
AMD_PG_STATE_GATE);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,8 @@ MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
|
|||
|
||||
#define smnMP1_FIRMWARE_FLAGS 0x3010028
|
||||
|
||||
static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554};
|
||||
|
||||
static int
|
||||
psp_v3_1_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
|
||||
{
|
||||
|
@ -210,12 +212,31 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (ver == adev->psp.sos_fw_version)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Double check if the latest four legacy versions.
|
||||
* If yes, it is still the right version.
|
||||
*/
|
||||
for (i = 0; i < sizeof(sos_old_versions) / sizeof(uint32_t); i++) {
|
||||
if (sos_old_versions[i] == adev->psp.sos_fw_version)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
unsigned int psp_gfxdrv_command_reg = 0;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t sol_reg;
|
||||
uint32_t sol_reg, ver;
|
||||
|
||||
/* Check sOS sign of life register to confirm sys driver and sOS
|
||||
* are already been loaded.
|
||||
|
@ -248,6 +269,10 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
|
|||
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
|
||||
0, true);
|
||||
|
||||
ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
|
||||
if (!psp_v3_1_match_version(adev, ver))
|
||||
DRM_WARN("SOS version doesn't match\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -685,6 +685,7 @@ static int soc15_common_early_init(void *handle)
|
|||
AMD_CG_SUPPORT_BIF_MGCG |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_ROM_MGCG |
|
||||
AMD_CG_SUPPORT_VCE_MGCG |
|
||||
AMD_CG_SUPPORT_UVD_MGCG;
|
||||
|
|
|
@ -769,14 +769,14 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool vcn_v1_0_is_idle(void *handle)
|
||||
static bool vcn_v1_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
|
||||
}
|
||||
|
||||
int vcn_v1_0_wait_for_idle(void *handle)
|
||||
static int vcn_v1_0_wait_for_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret = 0;
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <linux/moduleparam.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
|
@ -2095,12 +2096,6 @@ convert_color_depth_from_display_info(const struct drm_connector *connector)
|
|||
{
|
||||
uint32_t bpc = connector->display_info.bpc;
|
||||
|
||||
/* Limited color depth to 8bit
|
||||
* TODO: Still need to handle deep color
|
||||
*/
|
||||
if (bpc > 8)
|
||||
bpc = 8;
|
||||
|
||||
switch (bpc) {
|
||||
case 0:
|
||||
/* Temporary Work around, DRM don't parse color depth for
|
||||
|
@ -2316,27 +2311,22 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
|
|||
}
|
||||
}
|
||||
|
||||
static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
|
||||
static struct dc_sink *
|
||||
create_fake_sink(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct dc_sink *sink = NULL;
|
||||
struct dc_sink_init_data sink_init_data = { 0 };
|
||||
|
||||
struct dc_sink *sink = NULL;
|
||||
sink_init_data.link = aconnector->dc_link;
|
||||
sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
|
||||
|
||||
sink = dc_sink_create(&sink_init_data);
|
||||
if (!sink) {
|
||||
DRM_ERROR("Failed to create sink!\n");
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
|
||||
aconnector->fake_enable = true;
|
||||
|
||||
aconnector->dc_sink = sink;
|
||||
aconnector->dc_link->local_sink = sink;
|
||||
|
||||
return 0;
|
||||
return sink;
|
||||
}
|
||||
|
||||
static void set_multisync_trigger_params(
|
||||
|
@ -2399,7 +2389,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
struct dc_stream_state *stream = NULL;
|
||||
struct drm_display_mode mode = *drm_mode;
|
||||
bool native_mode_found = false;
|
||||
|
||||
struct dc_sink *sink = NULL;
|
||||
if (aconnector == NULL) {
|
||||
DRM_ERROR("aconnector is NULL!\n");
|
||||
return stream;
|
||||
|
@ -2417,15 +2407,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
return stream;
|
||||
}
|
||||
|
||||
if (create_fake_sink(aconnector))
|
||||
sink = create_fake_sink(aconnector);
|
||||
if (!sink)
|
||||
return stream;
|
||||
} else {
|
||||
sink = aconnector->dc_sink;
|
||||
}
|
||||
|
||||
stream = dc_create_stream_for_sink(aconnector->dc_sink);
|
||||
stream = dc_create_stream_for_sink(sink);
|
||||
|
||||
if (stream == NULL) {
|
||||
DRM_ERROR("Failed to create stream for sink!\n");
|
||||
return stream;
|
||||
goto finish;
|
||||
}
|
||||
|
||||
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
|
||||
|
@ -2464,12 +2457,15 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
fill_audio_info(
|
||||
&stream->audio_info,
|
||||
drm_connector,
|
||||
aconnector->dc_sink);
|
||||
sink);
|
||||
|
||||
update_stream_signal(stream);
|
||||
|
||||
if (dm_state && dm_state->freesync_capable)
|
||||
stream->ignore_msa_timing_param = true;
|
||||
finish:
|
||||
if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
|
||||
dc_sink_release(sink);
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
@ -2714,6 +2710,9 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
|
|||
struct dm_connector_state *state =
|
||||
to_dm_connector_state(connector->state);
|
||||
|
||||
if (connector->state)
|
||||
__drm_atomic_helper_connector_destroy_state(connector->state);
|
||||
|
||||
kfree(state);
|
||||
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
|
@ -2724,8 +2723,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
|
|||
state->underscan_hborder = 0;
|
||||
state->underscan_vborder = 0;
|
||||
|
||||
connector->state = &state->base;
|
||||
connector->state->connector = connector;
|
||||
__drm_atomic_helper_connector_reset(connector, &state->base);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3083,17 +3081,6 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
|||
}
|
||||
}
|
||||
|
||||
/* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
|
||||
* prepare and cleanup in drm_atomic_helper_prepare_planes
|
||||
* and drm_atomic_helper_cleanup_planes because fb doens't in s3.
|
||||
* IN 4.10 kernel this code should be removed and amdgpu_device_suspend
|
||||
* code touching fram buffers should be avoided for DC.
|
||||
*/
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR) {
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
|
||||
|
||||
acrtc->cursor_bo = obj;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4281,6 +4268,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
if (dm_old_crtc_state->stream)
|
||||
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
|
||||
|
||||
pm_runtime_get_noresume(dev->dev);
|
||||
|
||||
acrtc->enabled = true;
|
||||
acrtc->hw_mode = new_crtc_state->mode;
|
||||
crtc->hwmode = new_crtc_state->mode;
|
||||
|
@ -4469,6 +4458,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
drm_atomic_helper_wait_for_flip_done(dev, state);
|
||||
|
||||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
|
||||
/* Finally, drop a runtime PM reference for each newly disabled CRTC,
|
||||
* so we can put the GPU into runtime suspend if we're not driving any
|
||||
* displays anymore
|
||||
*/
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
if (old_crtc_state->active && !new_crtc_state->active)
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -555,6 +555,9 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (acrtc->otg_inst == -1)
|
||||
return 0;
|
||||
|
||||
irq_source = dal_irq_type + acrtc->otg_inst;
|
||||
|
||||
st = (state == AMDGPU_IRQ_STATE_ENABLE);
|
||||
|
|
|
@ -234,6 +234,33 @@ static void pp_to_dc_clock_levels(
|
|||
}
|
||||
}
|
||||
|
||||
static void pp_to_dc_clock_levels_with_latency(
|
||||
const struct pp_clock_levels_with_latency *pp_clks,
|
||||
struct dm_pp_clock_levels_with_latency *clk_level_info,
|
||||
enum dm_pp_clock_type dc_clk_type)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
|
||||
DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
|
||||
DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
|
||||
pp_clks->num_levels,
|
||||
DM_PP_MAX_CLOCK_LEVELS);
|
||||
|
||||
clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
|
||||
} else
|
||||
clk_level_info->num_levels = pp_clks->num_levels;
|
||||
|
||||
DRM_DEBUG("DM_PPLIB: values for %s clock\n",
|
||||
DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
|
||||
|
||||
for (i = 0; i < clk_level_info->num_levels; i++) {
|
||||
DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
|
||||
clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
|
||||
clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
|
||||
}
|
||||
}
|
||||
|
||||
bool dm_pp_get_clock_levels_by_type(
|
||||
const struct dc_context *ctx,
|
||||
enum dm_pp_clock_type clk_type,
|
||||
|
@ -311,8 +338,22 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
|
|||
enum dm_pp_clock_type clk_type,
|
||||
struct dm_pp_clock_levels_with_latency *clk_level_info)
|
||||
{
|
||||
/* TODO: to be implemented */
|
||||
return false;
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
struct pp_clock_levels_with_latency pp_clks = { 0 };
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
||||
if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
|
||||
return false;
|
||||
|
||||
if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
|
||||
dc_to_pp_clock_type(clk_type),
|
||||
&pp_clks))
|
||||
return false;
|
||||
|
||||
pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dm_pp_get_clock_levels_by_type_with_voltage(
|
||||
|
|
|
@ -449,6 +449,11 @@ static inline unsigned int clamp_ux_dy(
|
|||
return min_clamp;
|
||||
}
|
||||
|
||||
unsigned int dc_fixpt_u3d19(struct fixed31_32 arg)
|
||||
{
|
||||
return ux_dy(arg.value, 3, 19);
|
||||
}
|
||||
|
||||
unsigned int dc_fixpt_u2d19(struct fixed31_32 arg)
|
||||
{
|
||||
return ux_dy(arg.value, 2, 19);
|
||||
|
|
|
@ -1630,17 +1630,42 @@ static enum dc_status read_hpd_rx_irq_data(
|
|||
struct dc_link *link,
|
||||
union hpd_irq_data *irq_data)
|
||||
{
|
||||
static enum dc_status retval;
|
||||
|
||||
/* The HW reads 16 bytes from 200h on HPD,
|
||||
* but if we get an AUX_DEFER, the HW cannot retry
|
||||
* and this causes the CTS tests 4.3.2.1 - 3.2.4 to
|
||||
* fail, so we now explicitly read 6 bytes which is
|
||||
* the req from the above mentioned test cases.
|
||||
*
|
||||
* For DP 1.4 we need to read those from 2002h range.
|
||||
*/
|
||||
return core_link_read_dpcd(
|
||||
link,
|
||||
DP_SINK_COUNT,
|
||||
irq_data->raw,
|
||||
sizeof(union hpd_irq_data));
|
||||
if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
|
||||
retval = core_link_read_dpcd(
|
||||
link,
|
||||
DP_SINK_COUNT,
|
||||
irq_data->raw,
|
||||
sizeof(union hpd_irq_data));
|
||||
else {
|
||||
/* Read 2 bytes at this location,... */
|
||||
retval = core_link_read_dpcd(
|
||||
link,
|
||||
DP_SINK_COUNT_ESI,
|
||||
irq_data->raw,
|
||||
2);
|
||||
|
||||
if (retval != DC_OK)
|
||||
return retval;
|
||||
|
||||
/* ... then read remaining 4 at the other location */
|
||||
retval = core_link_read_dpcd(
|
||||
link,
|
||||
DP_LANE0_1_STATUS_ESI,
|
||||
&irq_data->raw[2],
|
||||
4);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static bool allow_hpd_rx_irq(const struct dc_link *link)
|
||||
|
@ -2278,7 +2303,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
|
|||
|
||||
static bool retrieve_link_cap(struct dc_link *link)
|
||||
{
|
||||
uint8_t dpcd_data[DP_TRAINING_AUX_RD_INTERVAL - DP_DPCD_REV + 1];
|
||||
uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
|
||||
|
||||
union down_stream_port_count down_strm_port_count;
|
||||
union edp_configuration_cap edp_config_cap;
|
||||
|
|
|
@ -72,7 +72,8 @@ static void dce110_update_generic_info_packet(
|
|||
uint32_t max_retries = 50;
|
||||
|
||||
/*we need turn on clock before programming AFMT block*/
|
||||
REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
|
||||
if (REG(AFMT_CNTL))
|
||||
REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
|
||||
|
||||
if (REG(AFMT_VBI_PACKET_CONTROL1)) {
|
||||
if (packet_index >= 8)
|
||||
|
@ -719,7 +720,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
|
|||
const uint32_t *content =
|
||||
(const uint32_t *) &info_frame->avi.sb[0];
|
||||
/*we need turn on clock before programming AFMT block*/
|
||||
REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
|
||||
if (REG(AFMT_CNTL))
|
||||
REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
|
||||
|
||||
REG_WRITE(AFMT_AVI_INFO0, content[0]);
|
||||
|
||||
|
|
|
@ -121,10 +121,10 @@ static void reset_lb_on_vblank(struct dc_context *ctx)
|
|||
frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
|
||||
|
||||
|
||||
for (retry = 100; retry > 0; retry--) {
|
||||
for (retry = 10000; retry > 0; retry--) {
|
||||
if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
|
||||
break;
|
||||
msleep(1);
|
||||
udelay(10);
|
||||
}
|
||||
if (!retry)
|
||||
dm_error("Frame count did not increase for 100ms.\n");
|
||||
|
@ -147,14 +147,14 @@ static void wait_for_fbc_state_changed(
|
|||
uint32_t addr = mmFBC_STATUS;
|
||||
uint32_t value;
|
||||
|
||||
while (counter < 10) {
|
||||
while (counter < 1000) {
|
||||
value = dm_read_reg(cp110->base.ctx, addr);
|
||||
if (get_reg_field_value(
|
||||
value,
|
||||
FBC_STATUS,
|
||||
FBC_ENABLE_STATUS) == enabled)
|
||||
break;
|
||||
msleep(10);
|
||||
udelay(100);
|
||||
counter++;
|
||||
}
|
||||
|
||||
|
|
|
@ -1004,9 +1004,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
|
|||
/*don't free audio if it is from retrain or internal disable stream*/
|
||||
if (option == FREE_ACQUIRED_RESOURCE && dc->caps.dynamic_audio == true) {
|
||||
/*we have to dynamic arbitrate the audio endpoints*/
|
||||
pipe_ctx->stream_res.audio = NULL;
|
||||
/*we free the resource, need reset is_audio_acquired*/
|
||||
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
|
||||
pipe_ctx->stream_res.audio = NULL;
|
||||
}
|
||||
|
||||
/* TODO: notify audio driver for if audio modes list changed
|
||||
|
|
|
@ -132,8 +132,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
|
|||
|
||||
#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
|
||||
|
||||
|
||||
bool dpp_get_optimal_number_of_taps(
|
||||
static bool dpp_get_optimal_number_of_taps(
|
||||
struct dpp *dpp,
|
||||
struct scaler_data *scl_data,
|
||||
const struct scaling_taps *in_taps)
|
||||
|
|
|
@ -1424,12 +1424,8 @@ void dpp1_set_degamma(
|
|||
enum ipp_degamma_mode mode);
|
||||
|
||||
void dpp1_set_degamma_pwl(struct dpp *dpp_base,
|
||||
const struct pwl_params *params);
|
||||
const struct pwl_params *params);
|
||||
|
||||
bool dpp_get_optimal_number_of_taps(
|
||||
struct dpp *dpp,
|
||||
struct scaler_data *scl_data,
|
||||
const struct scaling_taps *in_taps);
|
||||
|
||||
void dpp_read_state(struct dpp *dpp_base,
|
||||
struct dcn_dpp_state *s);
|
||||
|
|
|
@ -565,16 +565,16 @@ static void dpp1_dscl_set_manual_ratio_init(
|
|||
uint32_t init_int = 0;
|
||||
|
||||
REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
|
||||
SCL_H_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.horz) << 5);
|
||||
SCL_H_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.horz) << 5);
|
||||
|
||||
REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
|
||||
SCL_V_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.vert) << 5);
|
||||
SCL_V_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.vert) << 5);
|
||||
|
||||
REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0,
|
||||
SCL_H_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.horz_c) << 5);
|
||||
SCL_H_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.horz_c) << 5);
|
||||
|
||||
REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0,
|
||||
SCL_V_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.vert_c) << 5);
|
||||
SCL_V_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.vert_c) << 5);
|
||||
|
||||
/*
|
||||
* 0.24 format for fraction, first five bits zeroed
|
||||
|
|
|
@ -396,11 +396,15 @@ bool hubp1_program_surface_flip_and_addr(
|
|||
if (address->grph_stereo.right_addr.quad_part == 0)
|
||||
break;
|
||||
|
||||
REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
|
||||
REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
|
||||
PRIMARY_SURFACE_TMZ, address->tmz_surface,
|
||||
PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
|
||||
PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
|
||||
PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
|
||||
PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface,
|
||||
SECONDARY_SURFACE_TMZ, address->tmz_surface,
|
||||
SECONDARY_SURFACE_TMZ_C, address->tmz_surface,
|
||||
SECONDARY_META_SURFACE_TMZ, address->tmz_surface,
|
||||
SECONDARY_META_SURFACE_TMZ_C, address->tmz_surface);
|
||||
|
||||
if (address->grph_stereo.right_meta_addr.quad_part != 0) {
|
||||
|
||||
|
@ -459,9 +463,11 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable,
|
|||
uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0;
|
||||
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
|
||||
|
||||
REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
|
||||
REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
|
||||
PRIMARY_SURFACE_DCC_EN, dcc_en,
|
||||
PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
|
||||
PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk,
|
||||
SECONDARY_SURFACE_DCC_EN, dcc_en,
|
||||
SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
|
||||
}
|
||||
|
||||
void hubp1_program_surface_config(
|
||||
|
|
|
@ -312,6 +312,12 @@
|
|||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ_C, mask_sh),\
|
||||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\
|
||||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
|
||||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ, mask_sh),\
|
||||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ_C, mask_sh),\
|
||||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ, mask_sh),\
|
||||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\
|
||||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
|
||||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
|
||||
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
|
||||
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
|
||||
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
|
||||
|
@ -489,6 +495,8 @@
|
|||
type SECONDARY_META_SURFACE_TMZ_C;\
|
||||
type PRIMARY_SURFACE_DCC_EN;\
|
||||
type PRIMARY_SURFACE_DCC_IND_64B_BLK;\
|
||||
type SECONDARY_SURFACE_DCC_EN;\
|
||||
type SECONDARY_SURFACE_DCC_IND_64B_BLK;\
|
||||
type DET_BUF_PLANE1_BASE_ADDRESS;\
|
||||
type CROSSBAR_SRC_CB_B;\
|
||||
type CROSSBAR_SRC_CR_R;\
|
||||
|
|
|
@ -319,6 +319,10 @@ void enc1_stream_encoder_dp_set_stream_attribute(
|
|||
REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
|
||||
DP_COMPONENT_PIXEL_DEPTH_12BPC);
|
||||
break;
|
||||
case COLOR_DEPTH_161616:
|
||||
REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
|
||||
DP_COMPONENT_PIXEL_DEPTH_16BPC);
|
||||
break;
|
||||
default:
|
||||
REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
|
||||
DP_COMPONENT_PIXEL_DEPTH_6BPC);
|
||||
|
|
|
@ -496,6 +496,8 @@ static inline int dc_fixpt_ceil(struct fixed31_32 arg)
|
|||
* fractional
|
||||
*/
|
||||
|
||||
unsigned int dc_fixpt_u3d19(struct fixed31_32 arg);
|
||||
|
||||
unsigned int dc_fixpt_u2d19(struct fixed31_32 arg);
|
||||
|
||||
unsigned int dc_fixpt_u0d19(struct fixed31_32 arg);
|
||||
|
|
|
@ -36,13 +36,13 @@
|
|||
/* DF_CS_AON0_DramBaseAddress0 */
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x2
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x9
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x0000003CL
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000E00L
|
||||
#define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2026,17 +2026,15 @@ enum atom_smu11_syspll_id {
|
|||
SMU11_SYSPLL3_1_ID = 6,
|
||||
};
|
||||
|
||||
|
||||
enum atom_smu11_syspll0_clock_id {
|
||||
SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK
|
||||
SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK
|
||||
SMU11_SYSPLL0_DCLK_ID = 2, // DCLK
|
||||
SMU11_SYSPLL0_VCLK_ID = 3, // VCLK
|
||||
SMU11_SYSPLL0_ECLK_ID = 4, // ECLK
|
||||
SMU11_SYSPLL0_ECLK_ID = 0, // ECLK
|
||||
SMU11_SYSPLL0_SOCCLK_ID = 1, // SOCCLK
|
||||
SMU11_SYSPLL0_MP0CLK_ID = 2, // MP0CLK
|
||||
SMU11_SYSPLL0_DCLK_ID = 3, // DCLK
|
||||
SMU11_SYSPLL0_VCLK_ID = 4, // VCLK
|
||||
SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK
|
||||
};
|
||||
|
||||
|
||||
enum atom_smu11_syspll1_0_clock_id {
|
||||
SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a
|
||||
};
|
||||
|
|
|
@ -180,7 +180,6 @@ static int pp_late_init(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = handle;
|
||||
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
|
||||
int ret;
|
||||
|
||||
if (hwmgr && hwmgr->pm_en) {
|
||||
mutex_lock(&hwmgr->smu_lock);
|
||||
|
@ -191,13 +190,6 @@ static int pp_late_init(void *handle)
|
|||
if (adev->pm.smu_prv_buffer_size != 0)
|
||||
pp_reserve_vram_for_smu(adev);
|
||||
|
||||
if (hwmgr->hwmgr_func->gfx_off_control &&
|
||||
(hwmgr->feature_mask & PP_GFXOFF_MASK)) {
|
||||
ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true);
|
||||
if (ret)
|
||||
pr_err("gfx off enabling failed!\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -245,7 +237,7 @@ static int pp_set_powergating_state(void *handle,
|
|||
}
|
||||
|
||||
if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
|
||||
pr_info("%s was not implemented.\n", __func__);
|
||||
pr_debug("%s was not implemented.\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -265,19 +265,18 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
|
|||
if (skip)
|
||||
return 0;
|
||||
|
||||
if (!hwmgr->ps)
|
||||
/*
|
||||
* for vega12/vega20 which does not support power state manager
|
||||
* DAL clock limits should also be honoured
|
||||
*/
|
||||
phm_apply_clock_adjust_rules(hwmgr);
|
||||
|
||||
phm_pre_display_configuration_changed(hwmgr);
|
||||
|
||||
phm_display_configuration_changed(hwmgr);
|
||||
|
||||
if (hwmgr->ps)
|
||||
power_state_management(hwmgr, new_ps);
|
||||
else
|
||||
/*
|
||||
* for vega12/vega20 which does not support power state manager
|
||||
* DAL clock limits should also be honoured
|
||||
*/
|
||||
phm_apply_clock_adjust_rules(hwmgr);
|
||||
|
||||
phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
|
||||
|
||||
|
|
|
@ -496,7 +496,9 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
|
|||
uint32_t ix;
|
||||
|
||||
parameters.clk_id = id;
|
||||
parameters.syspll_id = 0;
|
||||
parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
|
||||
parameters.dfsdid = 0;
|
||||
|
||||
ix = GetIndexIntoMasterCmdTable(getsmuclockinfo);
|
||||
|
||||
|
@ -505,7 +507,7 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
|
|||
return -EINVAL;
|
||||
|
||||
output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)¶meters;
|
||||
*frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000;
|
||||
*frequency = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -870,12 +870,6 @@ static int init_over_drive_limits(
|
|||
hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
|
||||
hwmgr->platform_descriptor.overdriveVDDCStep = 0;
|
||||
|
||||
if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 \
|
||||
|| hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) {
|
||||
hwmgr->od_enabled = false;
|
||||
pr_debug("OverDrive feature not support by VBIOS\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1074,12 +1074,6 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
|
|||
powerplay_table,
|
||||
(const ATOM_FIRMWARE_INFO_V2_1 *)fw_info);
|
||||
|
||||
if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0
|
||||
&& hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) {
|
||||
hwmgr->od_enabled = false;
|
||||
pr_debug("OverDrive feature not support by VBIOS\n");
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,8 +53,37 @@ static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
|
|||
|
||||
|
||||
static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
|
||||
struct pp_display_clock_request *clock_req);
|
||||
struct pp_display_clock_request *clock_req)
|
||||
{
|
||||
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
|
||||
enum amd_pp_clock_type clk_type = clock_req->clock_type;
|
||||
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
|
||||
PPSMC_Msg msg;
|
||||
|
||||
switch (clk_type) {
|
||||
case amd_pp_dcf_clock:
|
||||
if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
|
||||
return 0;
|
||||
msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
|
||||
smu10_data->dcf_actual_hard_min_freq = clk_freq;
|
||||
break;
|
||||
case amd_pp_soc_clock:
|
||||
msg = PPSMC_MSG_SetHardMinSocclkByFreq;
|
||||
break;
|
||||
case amd_pp_f_clock:
|
||||
if (clk_freq == smu10_data->f_actual_hard_min_freq)
|
||||
return 0;
|
||||
smu10_data->f_actual_hard_min_freq = clk_freq;
|
||||
msg = PPSMC_MSG_SetHardMinFclkByFreq;
|
||||
break;
|
||||
default:
|
||||
pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
|
||||
return -EINVAL;
|
||||
}
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
|
||||
{
|
||||
|
@ -284,7 +313,7 @@ static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
|
|||
|
||||
static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return smu10_disable_gfx_off(hwmgr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
|
||||
|
@ -299,7 +328,7 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
|
|||
|
||||
static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return smu10_enable_gfx_off(hwmgr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
|
||||
|
@ -1000,6 +1029,12 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
|
|||
case amd_pp_soc_clock:
|
||||
pclk_vol_table = pinfo->vdd_dep_on_socclk;
|
||||
break;
|
||||
case amd_pp_disp_clock:
|
||||
pclk_vol_table = pinfo->vdd_dep_on_dispclk;
|
||||
break;
|
||||
case amd_pp_phy_clock:
|
||||
pclk_vol_table = pinfo->vdd_dep_on_phyclk;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1017,39 +1052,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
|
||||
struct pp_display_clock_request *clock_req)
|
||||
{
|
||||
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
|
||||
enum amd_pp_clock_type clk_type = clock_req->clock_type;
|
||||
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
|
||||
PPSMC_Msg msg;
|
||||
|
||||
switch (clk_type) {
|
||||
case amd_pp_dcf_clock:
|
||||
if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
|
||||
return 0;
|
||||
msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
|
||||
smu10_data->dcf_actual_hard_min_freq = clk_freq;
|
||||
break;
|
||||
case amd_pp_soc_clock:
|
||||
msg = PPSMC_MSG_SetHardMinSocclkByFreq;
|
||||
break;
|
||||
case amd_pp_f_clock:
|
||||
if (clk_freq == smu10_data->f_actual_hard_min_freq)
|
||||
return 0;
|
||||
smu10_data->f_actual_hard_min_freq = clk_freq;
|
||||
msg = PPSMC_MSG_SetHardMinFclkByFreq;
|
||||
break;
|
||||
default:
|
||||
pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
|
||||
{
|
||||
|
@ -1182,6 +1185,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
|
|||
.set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
|
||||
.smus_notify_pwe = smu10_smus_notify_pwe,
|
||||
.gfx_off_control = smu10_gfx_off_control,
|
||||
.display_clock_voltage_request = smu10_display_clock_voltage_request,
|
||||
};
|
||||
|
||||
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
|
||||
|
|
|
@ -791,7 +791,8 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
|
|||
data->dpm_table.sclk_table.count++;
|
||||
}
|
||||
}
|
||||
|
||||
if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
|
||||
/* Initialize Mclk DPM table based on allow Mclk values */
|
||||
data->dpm_table.mclk_table.count = 0;
|
||||
for (i = 0; i < dep_mclk_table->count; i++) {
|
||||
|
@ -806,6 +807,8 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
|
|||
}
|
||||
}
|
||||
|
||||
if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
|
||||
hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3752,14 +3755,17 @@ static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
|
|||
static int smu7_generate_dpm_level_enable_mask(
|
||||
struct pp_hwmgr *hwmgr, const void *input)
|
||||
{
|
||||
int result;
|
||||
int result = 0;
|
||||
const struct phm_set_power_state_input *states =
|
||||
(const struct phm_set_power_state_input *)input;
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
const struct smu7_power_state *smu7_ps =
|
||||
cast_const_phw_smu7_power_state(states->pnew_state);
|
||||
|
||||
result = smu7_trim_dpm_states(hwmgr, smu7_ps);
|
||||
/*skip the trim if od is enabled*/
|
||||
if (!hwmgr->od_enabled)
|
||||
result = smu7_trim_dpm_states(hwmgr, smu7_ps);
|
||||
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
|
|
|
@ -321,8 +321,12 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
|
|||
odn_table->min_vddc = dep_table[0]->entries[0].vddc;
|
||||
|
||||
i = od_table[2]->count - 1;
|
||||
od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock;
|
||||
od_table[2]->entries[i].vddc = odn_table->max_vddc;
|
||||
od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ?
|
||||
hwmgr->platform_descriptor.overdriveLimit.memoryClock :
|
||||
od_table[2]->entries[i].clk;
|
||||
od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ?
|
||||
odn_table->max_vddc :
|
||||
od_table[2]->entries[i].vddc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1311,6 +1315,9 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
|
|||
vega10_setup_default_single_dpm_table(hwmgr,
|
||||
dpm_table,
|
||||
dep_gfx_table);
|
||||
if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock =
|
||||
dpm_table->dpm_levels[dpm_table->count-1].value;
|
||||
vega10_init_dpm_state(&(dpm_table->dpm_state));
|
||||
|
||||
/* Initialize Mclk DPM table based on allow Mclk values */
|
||||
|
@ -1319,6 +1326,10 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
|
|||
vega10_setup_default_single_dpm_table(hwmgr,
|
||||
dpm_table,
|
||||
dep_mclk_table);
|
||||
if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
|
||||
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
|
||||
dpm_table->dpm_levels[dpm_table->count-1].value;
|
||||
|
||||
vega10_init_dpm_state(&(dpm_table->dpm_state));
|
||||
|
||||
data->dpm_table.eclk_table.count = 0;
|
||||
|
|
|
@ -1104,7 +1104,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
|
|||
for (count = 0; count < num_se; count++) {
|
||||
data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
|
||||
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
|
||||
result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
|
||||
result = vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
|
||||
result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
|
||||
result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
|
||||
result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
|
||||
|
|
|
@ -267,12 +267,6 @@ static int init_over_drive_limits(
|
|||
hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
|
||||
hwmgr->platform_descriptor.overdriveVDDCStep = 0;
|
||||
|
||||
if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 ||
|
||||
hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) {
|
||||
hwmgr->od_enabled = false;
|
||||
pr_debug("OverDrive feature not support by VBIOS\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -349,8 +349,13 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
|
|||
struct dma_fence * fence = entity->dependency;
|
||||
struct drm_sched_fence *s_fence;
|
||||
|
||||
if (fence->context == entity->fence_context) {
|
||||
/* We can ignore fences from ourself */
|
||||
if (fence->context == entity->fence_context ||
|
||||
fence->context == entity->fence_context + 1) {
|
||||
/*
|
||||
* Fence is a scheduled/finished fence from a job
|
||||
* which belongs to the same entity, we can ignore
|
||||
* fences from ourself
|
||||
*/
|
||||
dma_fence_put(entity->dependency);
|
||||
return false;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue