drm i915 gvt, amdgpu, core fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJcL65eAAoJEAx081l5xIa+y7EP+wQnTk3GV7rKiIi5LEtux5xW X2tTaPKHnwrMYjRaP2VNUntJPH6Wxcby3OHGNvGMe1IqNGL/5qRLQ/g1rSSPuM4z rYwWR/ooDU/KwYvsT/o+DSO62AoVzIqx8gn8+ShirRN3MdobCcwDebd5oqKjduOn hRy9WQwgPOnDG1D3fRWOGSzOE1K9yDFCUaR0AmhUehn9NvsztQGamMBBwMNg+y52 a5vu+nSLxQrv3ZyZ5TQUgAzi2pWFtC6QxIVuLpl5TqFA3vdRVyN1T78klDnQ7WU7 6GY1yq9D923c1Tfa0RZoXnE++bX91KKJ5y9YFuNFv8X/th6UoEzRrOPDINfLoZv3 JsPPSPAiZTgoXc/RGfoMbnidajNB7Gx+No+Pd8P6MeY5H1E+ivMXt5MrOgcMXUqk FajthiuSlaB+u5OjNjuS6gBbAMIKw7Idg4hEFSabj91qhJIet/fPhzNmp0HPJ1wF XlNnxI7XOytCAORrjLy2q4/lkaoG2AlVpZzeMLgXSxGGlSCtIpDUIqgQbtV1ppCi RboQ8yMflRejeK6oXoC92mI8yDB6rwoQy2tK0Hvnag5/q1r7AVYJq+3890NFEU4X F5TuCgvhswdkTEJUED1G6pnX7aQzW0dh6KrCltF34sFzD1etYb150En7laa+2kmX G5HfZbkLwscPt91moA6B =hFld -----END PGP SIGNATURE----- Merge tag 'drm-next-2019-01-05' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Happy New Year, just decloaking from leave to get some stuff from the last week in before rc1: core: - two regression fixes for damage blob and atomic i915 gvt: - Some missed GVT fixes from the original pull amdgpu: - new PCI IDs - SR-IOV fixes - DC fixes - Vega20 fixes" * tag 'drm-next-2019-01-05' of git://anongit.freedesktop.org/drm/drm: (53 commits) drm: Put damage blob when destroy plane state drm: fix null pointer dereference on null state pointer drm/amdgpu: Add new VegaM pci id drm/ttm: Use drm_debug_printer for all ttm_bo_mem_space_debug output drm/amdgpu: add Vega20 PSP ASD firmware loading drm/amd/display: Fix MST dp_blank REG_WAIT timeout drm/amd/display: validate extended dongle caps drm/amd/display: Use div_u64 for flip timestamp ns to ms drm/amdgpu/uvd:Change uvd ring name convention drm/amd/powerplay: add Vega20 LCLK DPM level setting support drm/amdgpu: print process info when job timeout drm/amdgpu/nbio7.4: add hw bug workaround for vega20 drm/amdgpu/nbio6.1: add hw bug workaround for vega10/12 drm/amd/display: Optimize passive update planes. drm/amd/display: verify lane status before exiting verify link cap drm/amd/display: Fix bug with not updating VSP infoframe drm/amd/display: Add retry to read ddc_clock pin drm/amd/display: Don't skip link training for empty dongle drm/amd/display: Wait edp HPD to high in detect_sink drm/amd/display: fix surface update sequence ...
This commit is contained in:
commit
0fe4e2d5cd
|
@ -1428,6 +1428,9 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
|
|||
if (IS_ERR(fence))
|
||||
return PTR_ERR(fence);
|
||||
|
||||
if (!fence)
|
||||
fence = dma_fence_get_stub();
|
||||
|
||||
switch (info->in.what) {
|
||||
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
|
||||
r = drm_syncobj_create(&syncobj, 0, fence);
|
||||
|
|
|
@ -3476,14 +3476,16 @@ static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
|
|||
mutex_lock(&adev->lock_reset);
|
||||
atomic_inc(&adev->gpu_reset_counter);
|
||||
adev->in_gpu_reset = 1;
|
||||
/* Block kfd */
|
||||
amdgpu_amdkfd_pre_reset(adev);
|
||||
/* Block kfd: SRIOV would do it separately */
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
amdgpu_amdkfd_pre_reset(adev);
|
||||
}
|
||||
|
||||
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
|
||||
{
|
||||
/*unlock kfd */
|
||||
amdgpu_amdkfd_post_reset(adev);
|
||||
/*unlock kfd: SRIOV would do it separately */
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
amdgpu_amdkfd_post_reset(adev);
|
||||
amdgpu_vf_error_trans_all(adev);
|
||||
adev->in_gpu_reset = 0;
|
||||
mutex_unlock(&adev->lock_reset);
|
||||
|
|
|
@ -865,6 +865,7 @@ static const struct pci_device_id pciidlist[] = {
|
|||
/* VEGAM */
|
||||
{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
|
||||
{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
|
||||
{0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
|
||||
/* Vega 10 */
|
||||
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
||||
|
|
|
@ -32,6 +32,9 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
|
|||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
|
||||
struct amdgpu_job *job = to_amdgpu_job(s_job);
|
||||
struct amdgpu_task_info ti;
|
||||
|
||||
memset(&ti, 0, sizeof(struct amdgpu_task_info));
|
||||
|
||||
if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
|
||||
DRM_ERROR("ring %s timeout, but soft recovered\n",
|
||||
|
@ -39,9 +42,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
|
|||
return;
|
||||
}
|
||||
|
||||
amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
|
||||
DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
|
||||
job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
|
||||
ring->fence_drv.sync_seq);
|
||||
DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
|
||||
ti.process_name, ti.tgid, ti.task_name, ti.pid);
|
||||
|
||||
if (amdgpu_device_should_recover_gpu(ring->adev))
|
||||
amdgpu_device_gpu_recover(ring->adev, job);
|
||||
|
|
|
@ -912,7 +912,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
|
|||
struct ttm_operation_ctx ctx = { false, false };
|
||||
int r, i;
|
||||
|
||||
if (!bo->pin_count) {
|
||||
if (WARN_ON_ONCE(!bo->pin_count)) {
|
||||
dev_warn(adev->dev, "%p unpin not necessary\n", bo);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -155,14 +155,6 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
|||
return ret;
|
||||
}
|
||||
|
||||
bool psp_support_vmr_ring(struct psp_context *psp)
|
||||
{
|
||||
if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
|
||||
struct psp_gfx_cmd_resp *cmd,
|
||||
uint64_t tmr_mc, uint32_t size)
|
||||
|
|
|
@ -83,12 +83,13 @@ struct psp_funcs
|
|||
enum AMDGPU_UCODE_ID ucode_type);
|
||||
bool (*smu_reload_quirk)(struct psp_context *psp);
|
||||
int (*mode1_reset)(struct psp_context *psp);
|
||||
uint64_t (*xgmi_get_node_id)(struct psp_context *psp);
|
||||
uint64_t (*xgmi_get_hive_id)(struct psp_context *psp);
|
||||
int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id);
|
||||
int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id);
|
||||
int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
|
||||
struct psp_xgmi_topology_info *topology);
|
||||
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
|
||||
struct psp_xgmi_topology_info *topology);
|
||||
bool (*support_vmr_ring)(struct psp_context *psp);
|
||||
};
|
||||
|
||||
struct psp_xgmi_context {
|
||||
|
@ -192,12 +193,14 @@ struct psp_xgmi_topology_info {
|
|||
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
|
||||
#define psp_smu_reload_quirk(psp) \
|
||||
((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
|
||||
#define psp_support_vmr_ring(psp) \
|
||||
((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false)
|
||||
#define psp_mode1_reset(psp) \
|
||||
((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
|
||||
#define psp_xgmi_get_node_id(psp) \
|
||||
((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0)
|
||||
#define psp_xgmi_get_hive_id(psp) \
|
||||
((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0)
|
||||
#define psp_xgmi_get_node_id(psp, node_id) \
|
||||
((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL)
|
||||
#define psp_xgmi_get_hive_id(psp, hive_id) \
|
||||
((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL)
|
||||
#define psp_xgmi_get_topology_info(psp, num_device, topology) \
|
||||
((psp)->funcs->xgmi_get_topology_info ? \
|
||||
(psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
|
||||
|
@ -217,8 +220,6 @@ extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
|
|||
|
||||
int psp_gpu_reset(struct amdgpu_device *adev);
|
||||
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
|
||||
bool psp_support_vmr_ring(struct psp_context *psp);
|
||||
|
||||
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include <drm/drm_print.h>
|
||||
|
||||
/* max number of rings */
|
||||
#define AMDGPU_MAX_RINGS 21
|
||||
#define AMDGPU_MAX_RINGS 23
|
||||
#define AMDGPU_MAX_GFX_RINGS 1
|
||||
#define AMDGPU_MAX_COMPUTE_RINGS 8
|
||||
#define AMDGPU_MAX_VCE_RINGS 3
|
||||
|
|
|
@ -262,7 +262,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
|
|||
|
||||
ring = &adev->vcn.ring_dec;
|
||||
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
|
||||
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
|
||||
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
|
||||
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
|
||||
|
@ -322,7 +322,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
|
|||
|
||||
ring = &adev->vcn.ring_dec;
|
||||
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
|
||||
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
|
||||
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
|
||||
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
|
||||
|
@ -396,16 +396,26 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
|
|||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
|
||||
struct dpg_pause_state new_state;
|
||||
unsigned int fences = 0;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
|
||||
}
|
||||
if (fences)
|
||||
new_state.fw_based = VCN_DPG_STATE__PAUSE;
|
||||
else
|
||||
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
|
||||
|
||||
if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
|
||||
new_state.jpeg = VCN_DPG_STATE__PAUSE;
|
||||
else
|
||||
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
|
||||
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
|
||||
new_state.fw_based = VCN_DPG_STATE__PAUSE;
|
||||
else
|
||||
new_state.fw_based = adev->vcn.pause_state.fw_based;
|
||||
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
|
||||
else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
|
||||
new_state.jpeg = VCN_DPG_STATE__PAUSE;
|
||||
else
|
||||
new_state.jpeg = adev->vcn.pause_state.jpeg;
|
||||
|
||||
amdgpu_vcn_pause_dpg_mode(adev, &new_state);
|
||||
}
|
||||
|
|
|
@ -97,8 +97,19 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
|||
if (!adev->gmc.xgmi.supported)
|
||||
return 0;
|
||||
|
||||
adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp);
|
||||
adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
|
||||
ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
|
||||
if (ret) {
|
||||
dev_err(adev->dev,
|
||||
"XGMI: Failed to get node id\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
|
||||
if (ret) {
|
||||
dev_err(adev->dev,
|
||||
"XGMI: Failed to get hive id\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&xgmi_mutex);
|
||||
hive = amdgpu_get_xgmi_hive(adev);
|
||||
|
|
|
@ -718,37 +718,46 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
|
||||
{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP};
|
||||
unsigned i;
|
||||
unsigned vmhub, inv_eng;
|
||||
|
||||
for (i = 0; i < adev->num_rings; ++i) {
|
||||
ring = adev->rings[i];
|
||||
vmhub = ring->funcs->vmhub;
|
||||
|
||||
inv_eng = ffs(vm_inv_engs[vmhub]);
|
||||
if (!inv_eng) {
|
||||
dev_err(adev->dev, "no VM inv eng for ring %s\n",
|
||||
ring->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ring->vm_inv_eng = inv_eng - 1;
|
||||
change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
|
||||
|
||||
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
|
||||
ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
/*
|
||||
* The latest engine allocation on gfx9 is:
|
||||
* Engine 0, 1: idle
|
||||
* Engine 2, 3: firmware
|
||||
* Engine 4~13: amdgpu ring, subject to change when ring number changes
|
||||
* Engine 14~15: idle
|
||||
* Engine 16: kfd tlb invalidation
|
||||
* Engine 17: Gart flushes
|
||||
*/
|
||||
unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (!gmc_v9_0_keep_stolen_memory(adev))
|
||||
amdgpu_bo_late_init(adev);
|
||||
|
||||
for(i = 0; i < adev->num_rings; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
|
||||
ring->vm_inv_eng = vm_inv_eng[vmhub]++;
|
||||
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
|
||||
ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
|
||||
}
|
||||
|
||||
/* Engine 16 is used for KFD and 17 for GART flushes */
|
||||
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
|
||||
BUG_ON(vm_inv_eng[i] > 16);
|
||||
r = gmc_v9_0_allocate_vm_inv_eng(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
|
||||
r = gmc_v9_0_ecc_available(adev);
|
||||
|
|
|
@ -24,6 +24,16 @@
|
|||
#ifndef __GMC_V9_0_H__
|
||||
#define __GMC_V9_0_H__
|
||||
|
||||
/*
|
||||
* The latest engine allocation on gfx9 is:
|
||||
* Engine 2, 3: firmware
|
||||
* Engine 0, 1, 4~16: amdgpu ring,
|
||||
* subject to change when ring number changes
|
||||
* Engine 17: Gart flushes
|
||||
*/
|
||||
#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
|
||||
#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
|
||||
|
||||
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#define smnCPM_CONTROL 0x11180460
|
||||
#define smnPCIE_CNTL2 0x11180070
|
||||
#define smnPCIE_CONFIG_CNTL 0x11180044
|
||||
#define smnPCIE_CI_CNTL 0x11180080
|
||||
|
||||
static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
|
||||
{
|
||||
|
@ -270,6 +271,12 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
|
|||
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
|
||||
|
||||
def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
|
||||
data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
|
||||
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPCIE_CI_CNTL, data);
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
|
||||
#define smnCPM_CONTROL 0x11180460
|
||||
#define smnPCIE_CNTL2 0x11180070
|
||||
#define smnPCIE_CI_CNTL 0x11180080
|
||||
|
||||
static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
|
||||
{
|
||||
|
@ -222,7 +223,13 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
|
|||
|
||||
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
|
||||
data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
|
||||
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPCIE_CI_CNTL, data);
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "nbio/nbio_7_4_offset.h"
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
|
||||
|
||||
/* address block */
|
||||
|
@ -100,6 +101,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
|||
char fw_name[30];
|
||||
int err = 0;
|
||||
const struct psp_firmware_header_v1_0 *sos_hdr;
|
||||
const struct psp_firmware_header_v1_0 *asd_hdr;
|
||||
const struct ta_firmware_header_v1_0 *ta_hdr;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
@ -132,14 +134,30 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
|||
adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
|
||||
le32_to_cpu(sos_hdr->sos_offset_bytes);
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
|
||||
err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out1;
|
||||
|
||||
err = amdgpu_ucode_validate(adev->psp.asd_fw);
|
||||
if (err)
|
||||
goto out1;
|
||||
|
||||
asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
|
||||
adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
|
||||
adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
|
||||
adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
|
||||
adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
|
||||
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
|
||||
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
goto out2;
|
||||
|
||||
err = amdgpu_ucode_validate(adev->psp.ta_fw);
|
||||
if (err)
|
||||
goto out;
|
||||
goto out2;
|
||||
|
||||
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
|
||||
adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
|
||||
|
@ -148,14 +166,18 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
|||
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
return 0;
|
||||
|
||||
out2:
|
||||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
out1:
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
adev->psp.asd_fw = NULL;
|
||||
out:
|
||||
if (err) {
|
||||
dev_err(adev->dev,
|
||||
"psp v11.0: Failed to load firmware \"%s\"\n",
|
||||
fw_name);
|
||||
release_firmware(adev->psp.sos_fw);
|
||||
adev->psp.sos_fw = NULL;
|
||||
}
|
||||
dev_err(adev->dev,
|
||||
"psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
|
||||
release_firmware(adev->psp.sos_fw);
|
||||
adev->psp.sos_fw = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -291,6 +313,13 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
|
||||
{
|
||||
if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int psp_v11_0_ring_create(struct psp_context *psp,
|
||||
enum psp_ring_type ring_type)
|
||||
{
|
||||
|
@ -299,7 +328,7 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
|
|||
struct psp_ring *ring = &psp->km_ring;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
if (psp_support_vmr_ring(psp)) {
|
||||
if (psp_v11_0_support_vmr_ring(psp)) {
|
||||
/* Write low address of the ring to C2PMSG_102 */
|
||||
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
|
||||
|
@ -351,7 +380,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
|
|||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
/* Write the ring destroy command*/
|
||||
if (psp_support_vmr_ring(psp))
|
||||
if (psp_v11_0_support_vmr_ring(psp))
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
|
||||
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
|
||||
else
|
||||
|
@ -362,7 +391,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
|
|||
mdelay(20);
|
||||
|
||||
/* Wait for response flag (bit 31) */
|
||||
if (psp_support_vmr_ring(psp))
|
||||
if (psp_v11_0_support_vmr_ring(psp))
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
|
||||
0x80000000, 0x80000000, false);
|
||||
else
|
||||
|
@ -406,7 +435,7 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
|
|||
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
|
||||
|
||||
/* KM (GPCOM) prepare write pointer */
|
||||
if (psp_support_vmr_ring(psp))
|
||||
if (psp_v11_0_support_vmr_ring(psp))
|
||||
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
|
||||
else
|
||||
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
|
||||
|
@ -438,7 +467,7 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
|
|||
|
||||
/* Update the write Pointer in DWORDs */
|
||||
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
|
||||
if (psp_support_vmr_ring(psp)) {
|
||||
if (psp_v11_0_support_vmr_ring(psp)) {
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
|
||||
} else
|
||||
|
@ -680,7 +709,7 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
|
|||
return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
|
||||
}
|
||||
|
||||
static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
|
||||
static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
|
||||
{
|
||||
struct ta_xgmi_shared_memory *xgmi_cmd;
|
||||
int ret;
|
||||
|
@ -693,12 +722,14 @@ static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
|
|||
/* Invoke xgmi ta to get hive id */
|
||||
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
|
||||
if (ret)
|
||||
return 0;
|
||||
else
|
||||
return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
|
||||
return ret;
|
||||
|
||||
*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
|
||||
static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
|
||||
{
|
||||
struct ta_xgmi_shared_memory *xgmi_cmd;
|
||||
int ret;
|
||||
|
@ -711,9 +742,11 @@ static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
|
|||
/* Invoke xgmi ta to get the node id */
|
||||
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
|
||||
if (ret)
|
||||
return 0;
|
||||
else
|
||||
return xgmi_cmd->xgmi_out_message.get_node_id.node_id;
|
||||
return ret;
|
||||
|
||||
*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct psp_funcs psp_v11_0_funcs = {
|
||||
|
@ -732,6 +765,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
|
|||
.xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
|
||||
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
|
||||
.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
|
||||
.support_vmr_ring = psp_v11_0_support_vmr_ring,
|
||||
};
|
||||
|
||||
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
|
||||
|
|
|
@ -240,8 +240,11 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
|
|||
* are already been loaded.
|
||||
*/
|
||||
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||
if (sol_reg)
|
||||
if (sol_reg) {
|
||||
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
|
||||
printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
|
|
|
@ -1458,8 +1458,7 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
|
|||
/*return fw_version >= 31;*/
|
||||
return false;
|
||||
case CHIP_VEGA20:
|
||||
/*return fw_version >= 115;*/
|
||||
return false;
|
||||
return fw_version >= 123;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
@ -1706,13 +1705,15 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
|
|||
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
|
||||
break;
|
||||
case 1:
|
||||
/* XXX compute */
|
||||
if (adev->asic_type == CHIP_VEGA20)
|
||||
amdgpu_fence_process(&adev->sdma.instance[instance].page);
|
||||
break;
|
||||
case 2:
|
||||
/* XXX compute */
|
||||
break;
|
||||
case 3:
|
||||
amdgpu_fence_process(&adev->sdma.instance[instance].page);
|
||||
if (adev->asic_type != CHIP_VEGA20)
|
||||
amdgpu_fence_process(&adev->sdma.instance[instance].page);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -49,14 +49,19 @@
|
|||
|
||||
#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \
|
||||
do { \
|
||||
uint32_t old_ = 0; \
|
||||
uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
|
||||
uint32_t loop = adev->usec_timeout; \
|
||||
while ((tmp_ & (mask)) != (expected_value)) { \
|
||||
udelay(2); \
|
||||
if (old_ != tmp_) { \
|
||||
loop = adev->usec_timeout; \
|
||||
old_ = tmp_; \
|
||||
} else \
|
||||
udelay(1); \
|
||||
tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
|
||||
loop--; \
|
||||
if (!loop) { \
|
||||
DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
|
||||
DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
|
||||
inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \
|
||||
ret = -ETIMEDOUT; \
|
||||
break; \
|
||||
|
|
|
@ -435,7 +435,7 @@ static int uvd_v7_0_sw_init(void *handle)
|
|||
continue;
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
ring = &adev->uvd.inst[j].ring;
|
||||
sprintf(ring->name, "uvd<%d>", j);
|
||||
sprintf(ring->name, "uvd_%d", ring->me);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -443,7 +443,7 @@ static int uvd_v7_0_sw_init(void *handle)
|
|||
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||
ring = &adev->uvd.inst[j].ring_enc[i];
|
||||
sprintf(ring->name, "uvd_enc%d<%d>", i, j);
|
||||
sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
ring->use_doorbell = true;
|
||||
|
||||
|
|
|
@ -214,7 +214,8 @@ static int vcn_v1_0_hw_fini(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
|
||||
|
||||
if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
|
||||
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
|
||||
RREG32_SOC15(VCN, 0, mmUVD_STATUS))
|
||||
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
|
||||
ring->sched.ready = false;
|
||||
|
@ -1087,7 +1088,8 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
|
|||
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
|
||||
~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
|
||||
|
||||
/* initialize wptr */
|
||||
/* initialize JPEG wptr */
|
||||
ring = &adev->vcn.ring_jpeg;
|
||||
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
|
||||
|
||||
/* copy patch commands to the jpeg ring */
|
||||
|
@ -1159,21 +1161,29 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
|
|||
static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret_code = 0;
|
||||
uint32_t tmp;
|
||||
|
||||
/* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
|
||||
|
||||
if (!ret_code) {
|
||||
int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
|
||||
/* wait for read ptr to be equal to write ptr */
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
|
||||
/* wait for read ptr to be equal to write ptr */
|
||||
tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
|
||||
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
|
||||
}
|
||||
tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
|
||||
|
||||
tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
|
||||
|
||||
tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
|
||||
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
|
||||
|
||||
/* disable dynamic power gating mode */
|
||||
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
|
||||
|
|
|
@ -87,9 +87,9 @@ static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
|
|||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
WREG32(mmPCIE_INDEX, reg);
|
||||
(void)RREG32(mmPCIE_INDEX);
|
||||
r = RREG32(mmPCIE_DATA);
|
||||
WREG32_NO_KIQ(mmPCIE_INDEX, reg);
|
||||
(void)RREG32_NO_KIQ(mmPCIE_INDEX);
|
||||
r = RREG32_NO_KIQ(mmPCIE_DATA);
|
||||
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
@ -99,10 +99,10 @@ static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
WREG32(mmPCIE_INDEX, reg);
|
||||
(void)RREG32(mmPCIE_INDEX);
|
||||
WREG32(mmPCIE_DATA, v);
|
||||
(void)RREG32(mmPCIE_DATA);
|
||||
WREG32_NO_KIQ(mmPCIE_INDEX, reg);
|
||||
(void)RREG32_NO_KIQ(mmPCIE_INDEX);
|
||||
WREG32_NO_KIQ(mmPCIE_DATA, v);
|
||||
(void)RREG32_NO_KIQ(mmPCIE_DATA);
|
||||
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -123,8 +123,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->smc_idx_lock, flags);
|
||||
WREG32(mmSMC_IND_INDEX_11, (reg));
|
||||
WREG32(mmSMC_IND_DATA_11, (v));
|
||||
WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
|
||||
WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
|
||||
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -1623,8 +1623,8 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
|
|||
return -EINVAL;
|
||||
|
||||
dmabuf = dma_buf_get(args->dmabuf_fd);
|
||||
if (!dmabuf)
|
||||
return -EINVAL;
|
||||
if (IS_ERR(dmabuf))
|
||||
return PTR_ERR(dmabuf);
|
||||
|
||||
mutex_lock(&p->mutex);
|
||||
|
||||
|
|
|
@ -331,12 +331,29 @@ static void dm_crtc_high_irq(void *interrupt_params)
|
|||
struct common_irq_params *irq_params = interrupt_params;
|
||||
struct amdgpu_device *adev = irq_params->adev;
|
||||
struct amdgpu_crtc *acrtc;
|
||||
struct dm_crtc_state *acrtc_state;
|
||||
|
||||
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
|
||||
|
||||
if (acrtc) {
|
||||
drm_crtc_handle_vblank(&acrtc->base);
|
||||
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
|
||||
|
||||
acrtc_state = to_dm_crtc_state(acrtc->base.state);
|
||||
|
||||
if (acrtc_state->stream &&
|
||||
acrtc_state->vrr_params.supported &&
|
||||
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
|
||||
mod_freesync_handle_v_update(
|
||||
adev->dm.freesync_module,
|
||||
acrtc_state->stream,
|
||||
&acrtc_state->vrr_params);
|
||||
|
||||
dc_stream_adjust_vmin_vmax(
|
||||
adev->dm.dc,
|
||||
acrtc_state->stream,
|
||||
&acrtc_state->vrr_params.adjust);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3009,7 +3026,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
|
|||
dc_stream_retain(state->stream);
|
||||
}
|
||||
|
||||
state->adjust = cur->adjust;
|
||||
state->vrr_params = cur->vrr_params;
|
||||
state->vrr_infopacket = cur->vrr_infopacket;
|
||||
state->abm_level = cur->abm_level;
|
||||
state->vrr_supported = cur->vrr_supported;
|
||||
|
@ -3628,10 +3645,20 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
|
|||
static int dm_plane_atomic_async_check(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_plane_state)
|
||||
{
|
||||
struct drm_plane_state *old_plane_state =
|
||||
drm_atomic_get_old_plane_state(new_plane_state->state, plane);
|
||||
|
||||
/* Only support async updates on cursor planes. */
|
||||
if (plane->type != DRM_PLANE_TYPE_CURSOR)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* DRM calls prepare_fb and cleanup_fb on new_plane_state for
|
||||
* async commits so don't allow fb changes.
|
||||
*/
|
||||
if (old_plane_state->fb != new_plane_state->fb)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4445,9 +4472,11 @@ struct dc_stream_status *dc_state_get_stream_status(
|
|||
static void update_freesync_state_on_stream(
|
||||
struct amdgpu_display_manager *dm,
|
||||
struct dm_crtc_state *new_crtc_state,
|
||||
struct dc_stream_state *new_stream)
|
||||
struct dc_stream_state *new_stream,
|
||||
struct dc_plane_state *surface,
|
||||
u32 flip_timestamp_in_us)
|
||||
{
|
||||
struct mod_vrr_params vrr = {0};
|
||||
struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
|
||||
struct dc_info_packet vrr_infopacket = {0};
|
||||
struct mod_freesync_config config = new_crtc_state->freesync_config;
|
||||
|
||||
|
@ -4474,43 +4503,52 @@ static void update_freesync_state_on_stream(
|
|||
|
||||
mod_freesync_build_vrr_params(dm->freesync_module,
|
||||
new_stream,
|
||||
&config, &vrr);
|
||||
&config, &vrr_params);
|
||||
|
||||
if (surface) {
|
||||
mod_freesync_handle_preflip(
|
||||
dm->freesync_module,
|
||||
surface,
|
||||
new_stream,
|
||||
flip_timestamp_in_us,
|
||||
&vrr_params);
|
||||
}
|
||||
|
||||
mod_freesync_build_vrr_infopacket(
|
||||
dm->freesync_module,
|
||||
new_stream,
|
||||
&vrr,
|
||||
&vrr_params,
|
||||
PACKET_TYPE_VRR,
|
||||
TRANSFER_FUNC_UNKNOWN,
|
||||
&vrr_infopacket);
|
||||
|
||||
new_crtc_state->freesync_timing_changed =
|
||||
(memcmp(&new_crtc_state->adjust,
|
||||
&vrr.adjust,
|
||||
sizeof(vrr.adjust)) != 0);
|
||||
(memcmp(&new_crtc_state->vrr_params.adjust,
|
||||
&vrr_params.adjust,
|
||||
sizeof(vrr_params.adjust)) != 0);
|
||||
|
||||
new_crtc_state->freesync_vrr_info_changed =
|
||||
(memcmp(&new_crtc_state->vrr_infopacket,
|
||||
&vrr_infopacket,
|
||||
sizeof(vrr_infopacket)) != 0);
|
||||
|
||||
new_crtc_state->adjust = vrr.adjust;
|
||||
new_crtc_state->vrr_params = vrr_params;
|
||||
new_crtc_state->vrr_infopacket = vrr_infopacket;
|
||||
|
||||
new_stream->adjust = new_crtc_state->adjust;
|
||||
new_stream->adjust = new_crtc_state->vrr_params.adjust;
|
||||
new_stream->vrr_infopacket = vrr_infopacket;
|
||||
|
||||
if (new_crtc_state->freesync_vrr_info_changed)
|
||||
DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
|
||||
new_crtc_state->base.crtc->base.id,
|
||||
(int)new_crtc_state->base.vrr_enabled,
|
||||
(int)vrr.state);
|
||||
(int)vrr_params.state);
|
||||
|
||||
if (new_crtc_state->freesync_timing_changed)
|
||||
DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
|
||||
new_crtc_state->base.crtc->base.id,
|
||||
vrr.adjust.v_total_min,
|
||||
vrr.adjust.v_total_max);
|
||||
vrr_params.adjust.v_total_min,
|
||||
vrr_params.adjust.v_total_max);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4524,6 +4562,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
|
|||
struct dc_state *state)
|
||||
{
|
||||
unsigned long flags;
|
||||
uint64_t timestamp_ns;
|
||||
uint32_t target_vblank;
|
||||
int r, vpos, hpos;
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
|
@ -4537,6 +4576,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
|
|||
struct dc_stream_update stream_update = {0};
|
||||
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
|
||||
struct dc_stream_status *stream_status;
|
||||
struct dc_plane_state *surface;
|
||||
|
||||
|
||||
/* Prepare wait for target vblank early - before the fence-waits */
|
||||
|
@ -4586,6 +4626,9 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
|
|||
addr.address.grph.addr.high_part = upper_32_bits(afb->address);
|
||||
addr.flip_immediate = async_flip;
|
||||
|
||||
timestamp_ns = ktime_get_ns();
|
||||
addr.flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
|
||||
|
||||
|
||||
if (acrtc->base.state->event)
|
||||
prepare_flip_isr(acrtc);
|
||||
|
@ -4599,8 +4642,10 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
|
|||
return;
|
||||
}
|
||||
|
||||
surface_updates->surface = stream_status->plane_states[0];
|
||||
if (!surface_updates->surface) {
|
||||
surface = stream_status->plane_states[0];
|
||||
surface_updates->surface = surface;
|
||||
|
||||
if (!surface) {
|
||||
DRM_ERROR("No surface for CRTC: id=%d\n",
|
||||
acrtc->crtc_id);
|
||||
return;
|
||||
|
@ -4611,7 +4656,9 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
|
|||
update_freesync_state_on_stream(
|
||||
&adev->dm,
|
||||
acrtc_state,
|
||||
acrtc_state->stream);
|
||||
acrtc_state->stream,
|
||||
surface,
|
||||
addr.flip_timestamp_in_us);
|
||||
|
||||
if (acrtc_state->freesync_timing_changed)
|
||||
stream_update.adjust =
|
||||
|
@ -4622,7 +4669,16 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
|
|||
&acrtc_state->stream->vrr_infopacket;
|
||||
}
|
||||
|
||||
/* Update surface timing information. */
|
||||
surface->time.time_elapsed_in_us[surface->time.index] =
|
||||
addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us;
|
||||
surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us;
|
||||
surface->time.index++;
|
||||
if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
|
||||
surface->time.index = 0;
|
||||
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
|
||||
dc_commit_updates_for_stream(adev->dm.dc,
|
||||
surface_updates,
|
||||
1,
|
||||
|
@ -5314,6 +5370,7 @@ static void get_freesync_config_for_crtc(
|
|||
config.max_refresh_in_uhz =
|
||||
aconnector->max_vfreq * 1000000;
|
||||
config.vsif_supported = true;
|
||||
config.btr = true;
|
||||
}
|
||||
|
||||
new_crtc_state->freesync_config = config;
|
||||
|
@ -5324,8 +5381,8 @@ static void reset_freesync_config_for_crtc(
|
|||
{
|
||||
new_crtc_state->vrr_supported = false;
|
||||
|
||||
memset(&new_crtc_state->adjust, 0,
|
||||
sizeof(new_crtc_state->adjust));
|
||||
memset(&new_crtc_state->vrr_params, 0,
|
||||
sizeof(new_crtc_state->vrr_params));
|
||||
memset(&new_crtc_state->vrr_infopacket, 0,
|
||||
sizeof(new_crtc_state->vrr_infopacket));
|
||||
}
|
||||
|
|
|
@ -268,7 +268,7 @@ struct dm_crtc_state {
|
|||
|
||||
bool vrr_supported;
|
||||
struct mod_freesync_config freesync_config;
|
||||
struct dc_crtc_timing_adjust adjust;
|
||||
struct mod_vrr_params vrr_params;
|
||||
struct dc_info_packet vrr_infopacket;
|
||||
|
||||
int abm_level;
|
||||
|
|
|
@ -638,6 +638,7 @@ static enum bp_result get_ss_info_v4_1(
|
|||
{
|
||||
enum bp_result result = BP_RESULT_OK;
|
||||
struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL;
|
||||
struct atom_smu_info_v3_3 *smu_info = NULL;
|
||||
|
||||
if (!ss_info)
|
||||
return BP_RESULT_BADINPUT;
|
||||
|
@ -650,6 +651,7 @@ static enum bp_result get_ss_info_v4_1(
|
|||
if (!disp_cntl_tbl)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
|
||||
ss_info->type.STEP_AND_DELAY_INFO = false;
|
||||
ss_info->spread_percentage_divider = 1000;
|
||||
/* BIOS no longer uses target clock. Always enable for now */
|
||||
|
@ -688,6 +690,19 @@ static enum bp_result get_ss_info_v4_1(
|
|||
*/
|
||||
result = BP_RESULT_UNSUPPORTED;
|
||||
break;
|
||||
case AS_SIGNAL_TYPE_XGMI:
|
||||
smu_info = GET_IMAGE(struct atom_smu_info_v3_3,
|
||||
DATA_TABLES(smu_info));
|
||||
if (!smu_info)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
ss_info->spread_spectrum_percentage =
|
||||
smu_info->waflclk_ss_percentage;
|
||||
ss_info->spread_spectrum_range =
|
||||
smu_info->gpuclk_ss_rate_10hz * 10;
|
||||
if (smu_info->waflclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
break;
|
||||
default:
|
||||
result = BP_RESULT_UNSUPPORTED;
|
||||
}
|
||||
|
|
|
@ -67,6 +67,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
|
|||
return true;
|
||||
#endif
|
||||
case DCE_VERSION_12_0:
|
||||
case DCE_VERSION_12_1:
|
||||
*h = dal_cmd_tbl_helper_dce112_get_table2();
|
||||
return true;
|
||||
|
||||
|
|
|
@ -151,10 +151,6 @@ static bool create_links(
|
|||
return false;
|
||||
}
|
||||
|
||||
if (connectors_num == 0 && num_virtual_links == 0) {
|
||||
dm_error("DC: Number of connectors is zero!\n");
|
||||
}
|
||||
|
||||
dm_output_to_console(
|
||||
"DC: %s: connectors_num: physical:%d, virtual:%d\n",
|
||||
__func__,
|
||||
|
@ -1471,7 +1467,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
|||
|
||||
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
|
||||
stream_update->vrr_infopacket ||
|
||||
stream_update->vsc_infopacket) {
|
||||
stream_update->vsc_infopacket ||
|
||||
stream_update->vsp_infopacket) {
|
||||
resource_build_info_frame(pipe_ctx);
|
||||
dc->hwss.update_info_frame(pipe_ctx);
|
||||
}
|
||||
|
@ -1573,9 +1570,6 @@ static void commit_planes_for_stream(struct dc *dc,
|
|||
}
|
||||
}
|
||||
|
||||
if (update_type == UPDATE_TYPE_FULL)
|
||||
context_timing_trace(dc, &context->res_ctx);
|
||||
|
||||
// Update Type FAST, Surface updates
|
||||
if (update_type == UPDATE_TYPE_FAST) {
|
||||
/* Lock the top pipe while updating plane addrs, since freesync requires
|
||||
|
|
|
@ -215,6 +215,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
|
|||
return true;
|
||||
}
|
||||
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP)
|
||||
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
|
||||
|
||||
/* todo: may need to lock gpio access */
|
||||
hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
|
||||
if (hpd_pin == NULL)
|
||||
|
@ -339,7 +342,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
|
|||
{
|
||||
enum gpio_result gpio_result;
|
||||
uint32_t clock_pin = 0;
|
||||
|
||||
uint8_t retry = 0;
|
||||
struct ddc *ddc;
|
||||
|
||||
enum connector_id connector_id =
|
||||
|
@ -368,11 +371,22 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
|
|||
return present;
|
||||
}
|
||||
|
||||
/* Read GPIO: DP sink is present if both clock and data pins are zero */
|
||||
/* [anaumov] in DAL2, there was no check for GPIO failure */
|
||||
|
||||
gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
|
||||
ASSERT(gpio_result == GPIO_RESULT_OK);
|
||||
/*
|
||||
* Read GPIO: DP sink is present if both clock and data pins are zero
|
||||
*
|
||||
* [W/A] plug-unplug DP cable, sometimes customer board has
|
||||
* one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI
|
||||
* then monitor can't br light up. Add retry 3 times
|
||||
* But in real passive dongle, it need additional 3ms to detect
|
||||
*/
|
||||
do {
|
||||
gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
|
||||
ASSERT(gpio_result == GPIO_RESULT_OK);
|
||||
if (clock_pin)
|
||||
udelay(1000);
|
||||
else
|
||||
break;
|
||||
} while (retry++ < 3);
|
||||
|
||||
present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
|
||||
|
||||
|
@ -703,12 +717,26 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
|||
if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
|
||||
same_dpcd = false;
|
||||
}
|
||||
/* Active dongle downstream unplug */
|
||||
/* Active dongle plug in without display or downstream unplug*/
|
||||
if (link->type == dc_connection_active_dongle
|
||||
&& link->dpcd_caps.sink_count.
|
||||
bits.SINK_COUNT == 0) {
|
||||
if (prev_sink != NULL)
|
||||
if (prev_sink != NULL) {
|
||||
/* Downstream unplug */
|
||||
dc_sink_release(prev_sink);
|
||||
} else {
|
||||
/* Empty dongle plug in */
|
||||
for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
|
||||
int fail_count = 0;
|
||||
|
||||
dp_verify_link_cap(link,
|
||||
&link->reported_link_cap,
|
||||
&fail_count);
|
||||
|
||||
if (fail_count == 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2622,11 +2650,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
|
|||
{
|
||||
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
|
||||
|
||||
core_dc->hwss.blank_stream(pipe_ctx);
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
deallocate_mst_payload(pipe_ctx);
|
||||
|
||||
core_dc->hwss.blank_stream(pipe_ctx);
|
||||
|
||||
core_dc->hwss.disable_stream(pipe_ctx, option);
|
||||
|
||||
disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
|
||||
|
|
|
@ -1089,6 +1089,121 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
|
|||
return max_link_cap;
|
||||
}
|
||||
|
||||
static enum dc_status read_hpd_rx_irq_data(
|
||||
struct dc_link *link,
|
||||
union hpd_irq_data *irq_data)
|
||||
{
|
||||
static enum dc_status retval;
|
||||
|
||||
/* The HW reads 16 bytes from 200h on HPD,
|
||||
* but if we get an AUX_DEFER, the HW cannot retry
|
||||
* and this causes the CTS tests 4.3.2.1 - 3.2.4 to
|
||||
* fail, so we now explicitly read 6 bytes which is
|
||||
* the req from the above mentioned test cases.
|
||||
*
|
||||
* For DP 1.4 we need to read those from 2002h range.
|
||||
*/
|
||||
if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
|
||||
retval = core_link_read_dpcd(
|
||||
link,
|
||||
DP_SINK_COUNT,
|
||||
irq_data->raw,
|
||||
sizeof(union hpd_irq_data));
|
||||
else {
|
||||
/* Read 14 bytes in a single read and then copy only the required fields.
|
||||
* This is more efficient than doing it in two separate AUX reads. */
|
||||
|
||||
uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
|
||||
|
||||
retval = core_link_read_dpcd(
|
||||
link,
|
||||
DP_SINK_COUNT_ESI,
|
||||
tmp,
|
||||
sizeof(tmp));
|
||||
|
||||
if (retval != DC_OK)
|
||||
return retval;
|
||||
|
||||
irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
|
||||
irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
|
||||
irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
|
||||
irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
|
||||
irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
|
||||
irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static bool hpd_rx_irq_check_link_loss_status(
|
||||
struct dc_link *link,
|
||||
union hpd_irq_data *hpd_irq_dpcd_data)
|
||||
{
|
||||
uint8_t irq_reg_rx_power_state = 0;
|
||||
enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
|
||||
union lane_status lane_status;
|
||||
uint32_t lane;
|
||||
bool sink_status_changed;
|
||||
bool return_code;
|
||||
|
||||
sink_status_changed = false;
|
||||
return_code = false;
|
||||
|
||||
if (link->cur_link_settings.lane_count == 0)
|
||||
return return_code;
|
||||
|
||||
/*1. Check that Link Status changed, before re-training.*/
|
||||
|
||||
/*parse lane status*/
|
||||
for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
|
||||
/* check status of lanes 0,1
|
||||
* changed DpcdAddress_Lane01Status (0x202)
|
||||
*/
|
||||
lane_status.raw = get_nibble_at_index(
|
||||
&hpd_irq_dpcd_data->bytes.lane01_status.raw,
|
||||
lane);
|
||||
|
||||
if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
|
||||
!lane_status.bits.CR_DONE_0 ||
|
||||
!lane_status.bits.SYMBOL_LOCKED_0) {
|
||||
/* if one of the channel equalization, clock
|
||||
* recovery or symbol lock is dropped
|
||||
* consider it as (link has been
|
||||
* dropped) dp sink status has changed
|
||||
*/
|
||||
sink_status_changed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check interlane align.*/
|
||||
if (sink_status_changed ||
|
||||
!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
|
||||
|
||||
DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
|
||||
|
||||
return_code = true;
|
||||
|
||||
/*2. Check that we can handle interrupt: Not in FS DOS,
|
||||
* Not in "Display Timeout" state, Link is trained.
|
||||
*/
|
||||
dpcd_result = core_link_read_dpcd(link,
|
||||
DP_SET_POWER,
|
||||
&irq_reg_rx_power_state,
|
||||
sizeof(irq_reg_rx_power_state));
|
||||
|
||||
if (dpcd_result != DC_OK) {
|
||||
DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
|
||||
__func__);
|
||||
} else {
|
||||
if (irq_reg_rx_power_state != DP_SET_POWER_D0)
|
||||
return_code = false;
|
||||
}
|
||||
}
|
||||
|
||||
return return_code;
|
||||
}
|
||||
|
||||
bool dp_verify_link_cap(
|
||||
struct dc_link *link,
|
||||
struct dc_link_settings *known_limit_link_setting,
|
||||
|
@ -1104,12 +1219,14 @@ bool dp_verify_link_cap(
|
|||
struct clock_source *dp_cs;
|
||||
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
|
||||
enum link_training_result status;
|
||||
union hpd_irq_data irq_data;
|
||||
|
||||
if (link->dc->debug.skip_detection_link_training) {
|
||||
link->verified_link_cap = *known_limit_link_setting;
|
||||
return true;
|
||||
}
|
||||
|
||||
memset(&irq_data, 0, sizeof(irq_data));
|
||||
success = false;
|
||||
skip_link_training = false;
|
||||
|
||||
|
@ -1168,9 +1285,15 @@ bool dp_verify_link_cap(
|
|||
(*fail_count)++;
|
||||
}
|
||||
|
||||
if (success)
|
||||
if (success) {
|
||||
link->verified_link_cap = *cur;
|
||||
|
||||
udelay(1000);
|
||||
if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK)
|
||||
if (hpd_rx_irq_check_link_loss_status(
|
||||
link,
|
||||
&irq_data))
|
||||
(*fail_count)++;
|
||||
}
|
||||
/* always disable the link before trying another
|
||||
* setting or before returning we'll enable it later
|
||||
* based on the actual mode we're driving
|
||||
|
@ -1572,122 +1695,6 @@ void decide_link_settings(struct dc_stream_state *stream,
|
|||
}
|
||||
|
||||
/*************************Short Pulse IRQ***************************/
|
||||
|
||||
static bool hpd_rx_irq_check_link_loss_status(
|
||||
struct dc_link *link,
|
||||
union hpd_irq_data *hpd_irq_dpcd_data)
|
||||
{
|
||||
uint8_t irq_reg_rx_power_state = 0;
|
||||
enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
|
||||
union lane_status lane_status;
|
||||
uint32_t lane;
|
||||
bool sink_status_changed;
|
||||
bool return_code;
|
||||
|
||||
sink_status_changed = false;
|
||||
return_code = false;
|
||||
|
||||
if (link->cur_link_settings.lane_count == 0)
|
||||
return return_code;
|
||||
|
||||
/*1. Check that Link Status changed, before re-training.*/
|
||||
|
||||
/*parse lane status*/
|
||||
for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
|
||||
/* check status of lanes 0,1
|
||||
* changed DpcdAddress_Lane01Status (0x202)
|
||||
*/
|
||||
lane_status.raw = get_nibble_at_index(
|
||||
&hpd_irq_dpcd_data->bytes.lane01_status.raw,
|
||||
lane);
|
||||
|
||||
if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
|
||||
!lane_status.bits.CR_DONE_0 ||
|
||||
!lane_status.bits.SYMBOL_LOCKED_0) {
|
||||
/* if one of the channel equalization, clock
|
||||
* recovery or symbol lock is dropped
|
||||
* consider it as (link has been
|
||||
* dropped) dp sink status has changed
|
||||
*/
|
||||
sink_status_changed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check interlane align.*/
|
||||
if (sink_status_changed ||
|
||||
!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
|
||||
|
||||
DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
|
||||
|
||||
return_code = true;
|
||||
|
||||
/*2. Check that we can handle interrupt: Not in FS DOS,
|
||||
* Not in "Display Timeout" state, Link is trained.
|
||||
*/
|
||||
dpcd_result = core_link_read_dpcd(link,
|
||||
DP_SET_POWER,
|
||||
&irq_reg_rx_power_state,
|
||||
sizeof(irq_reg_rx_power_state));
|
||||
|
||||
if (dpcd_result != DC_OK) {
|
||||
DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
|
||||
__func__);
|
||||
} else {
|
||||
if (irq_reg_rx_power_state != DP_SET_POWER_D0)
|
||||
return_code = false;
|
||||
}
|
||||
}
|
||||
|
||||
return return_code;
|
||||
}
|
||||
|
||||
static enum dc_status read_hpd_rx_irq_data(
|
||||
struct dc_link *link,
|
||||
union hpd_irq_data *irq_data)
|
||||
{
|
||||
static enum dc_status retval;
|
||||
|
||||
/* The HW reads 16 bytes from 200h on HPD,
|
||||
* but if we get an AUX_DEFER, the HW cannot retry
|
||||
* and this causes the CTS tests 4.3.2.1 - 3.2.4 to
|
||||
* fail, so we now explicitly read 6 bytes which is
|
||||
* the req from the above mentioned test cases.
|
||||
*
|
||||
* For DP 1.4 we need to read those from 2002h range.
|
||||
*/
|
||||
if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
|
||||
retval = core_link_read_dpcd(
|
||||
link,
|
||||
DP_SINK_COUNT,
|
||||
irq_data->raw,
|
||||
sizeof(union hpd_irq_data));
|
||||
else {
|
||||
/* Read 14 bytes in a single read and then copy only the required fields.
|
||||
* This is more efficient than doing it in two separate AUX reads. */
|
||||
|
||||
uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
|
||||
|
||||
retval = core_link_read_dpcd(
|
||||
link,
|
||||
DP_SINK_COUNT_ESI,
|
||||
tmp,
|
||||
sizeof(tmp));
|
||||
|
||||
if (retval != DC_OK)
|
||||
return retval;
|
||||
|
||||
irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
|
||||
irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
|
||||
irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
|
||||
irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
|
||||
irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
|
||||
irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static bool allow_hpd_rx_irq(const struct dc_link *link)
|
||||
{
|
||||
/*
|
||||
|
@ -2240,7 +2247,8 @@ static void get_active_converter_info(
|
|||
translate_dpcd_max_bpc(
|
||||
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
|
||||
|
||||
link->dpcd_caps.dongle_caps.extendedCapValid = true;
|
||||
if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
|
||||
link->dpcd_caps.dongle_caps.extendedCapValid = true;
|
||||
}
|
||||
|
||||
break;
|
||||
|
|
|
@ -96,6 +96,7 @@ void dp_enable_link_phy(
|
|||
link_settings,
|
||||
clock_source);
|
||||
}
|
||||
link->cur_link_settings = *link_settings;
|
||||
|
||||
dp_receiver_power_ctrl(link, true);
|
||||
}
|
||||
|
@ -307,6 +308,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
|
|||
link->link_enc,
|
||||
link_setting,
|
||||
pipes[i].clock_source->id);
|
||||
link->cur_link_settings = *link_setting;
|
||||
|
||||
dp_receiver_power_ctrl(link, true);
|
||||
|
||||
|
@ -316,7 +318,6 @@ void dp_retrain_link_dp_test(struct dc_link *link,
|
|||
skip_video_pattern,
|
||||
LINK_TRAINING_ATTEMPTS);
|
||||
|
||||
link->cur_link_settings = *link_setting;
|
||||
|
||||
link->dc->hwss.enable_stream(&pipes[i]);
|
||||
|
||||
|
|
|
@ -83,7 +83,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
|||
dc_version = DCE_VERSION_11_22;
|
||||
break;
|
||||
case FAMILY_AI:
|
||||
dc_version = DCE_VERSION_12_0;
|
||||
if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
|
||||
dc_version = DCE_VERSION_12_1;
|
||||
else
|
||||
dc_version = DCE_VERSION_12_0;
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
case FAMILY_RV:
|
||||
|
@ -136,6 +139,7 @@ struct resource_pool *dc_create_resource_pool(
|
|||
num_virtual_links, dc);
|
||||
break;
|
||||
case DCE_VERSION_12_0:
|
||||
case DCE_VERSION_12_1:
|
||||
res_pool = dce120_create_resource_pool(
|
||||
num_virtual_links, dc);
|
||||
break;
|
||||
|
|
|
@ -234,14 +234,14 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
|
|||
if (field_value == condition_value) {
|
||||
if (i * delay_between_poll_us > 1000 &&
|
||||
!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
|
||||
dm_output_to_console("REG_WAIT taking a while: %dms in %s line:%d\n",
|
||||
DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
|
||||
delay_between_poll_us * i / 1000,
|
||||
func_name, line);
|
||||
return reg_val;
|
||||
}
|
||||
}
|
||||
|
||||
dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
|
||||
DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
|
||||
delay_between_poll_us, time_out_num_tries,
|
||||
func_name, line);
|
||||
|
||||
|
|
|
@ -192,7 +192,6 @@ enum surface_pixel_format {
|
|||
/*swaped & float*/
|
||||
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
|
||||
/*grow graphics here if necessary */
|
||||
SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
|
||||
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
|
||||
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
|
||||
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
|
||||
|
@ -200,6 +199,7 @@ enum surface_pixel_format {
|
|||
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
|
||||
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
|
||||
SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
|
||||
SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
|
||||
SURFACE_PIXEL_FORMAT_INVALID
|
||||
|
||||
/*grow 444 video here if necessary */
|
||||
|
|
|
@ -676,6 +676,11 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
|
|||
{
|
||||
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
|
||||
struct dm_pp_power_level_change_request level_change_req;
|
||||
int unpatched_disp_clk = context->bw.dce.dispclk_khz;
|
||||
|
||||
/*TODO: W/A for dal3 linux, investigate why this works */
|
||||
if (!clk_mgr_dce->dfs_bypass_active)
|
||||
context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
|
||||
|
||||
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
|
||||
/* get max clock state from PPLIB */
|
||||
|
@ -690,6 +695,8 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
|
|||
clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
|
||||
}
|
||||
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
|
||||
|
||||
context->bw.dce.dispclk_khz = unpatched_disp_clk;
|
||||
}
|
||||
|
||||
static void dce12_update_clocks(struct clk_mgr *clk_mgr,
|
||||
|
|
|
@ -1267,10 +1267,19 @@ static void program_scaler(const struct dc *dc,
|
|||
pipe_ctx->plane_res.scl_data.lb_params.depth,
|
||||
&pipe_ctx->stream->bit_depth_params);
|
||||
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color)
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) {
|
||||
/*
|
||||
* The way 420 is packed, 2 channels carry Y component, 1 channel
|
||||
* alternate between Cb and Cr, so both channels need the pixel
|
||||
* value for Y
|
||||
*/
|
||||
if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
color.color_r_cr = color.color_g_y;
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color(
|
||||
pipe_ctx->stream_res.tg,
|
||||
&color);
|
||||
}
|
||||
|
||||
pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm,
|
||||
&pipe_ctx->plane_res.scl_data);
|
||||
|
|
|
@ -2159,6 +2159,15 @@ static void dcn10_blank_pixel_data(
|
|||
color_space = stream->output_color_space;
|
||||
color_space_to_black_color(dc, color_space, &black_color);
|
||||
|
||||
/*
|
||||
* The way 420 is packed, 2 channels carry Y component, 1 channel
|
||||
* alternate between Cb and Cr, so both channels need the pixel
|
||||
* value for Y
|
||||
*/
|
||||
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
black_color.color_r_cr = black_color.color_g_y;
|
||||
|
||||
|
||||
if (stream_res->tg->funcs->set_blank_color)
|
||||
stream_res->tg->funcs->set_blank_color(
|
||||
stream_res->tg,
|
||||
|
@ -2348,7 +2357,8 @@ static void dcn10_apply_ctx_for_surface(
|
|||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
/* Skip inactive pipes and ones already updated */
|
||||
if (!pipe_ctx->stream || pipe_ctx->stream == stream)
|
||||
if (!pipe_ctx->stream || pipe_ctx->stream == stream
|
||||
|| !pipe_ctx->plane_state)
|
||||
continue;
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
|
||||
|
@ -2362,7 +2372,8 @@ static void dcn10_apply_ctx_for_surface(
|
|||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe_ctx->stream || pipe_ctx->stream == stream)
|
||||
if (!pipe_ctx->stream || pipe_ctx->stream == stream
|
||||
|| !pipe_ctx->plane_state)
|
||||
continue;
|
||||
|
||||
dcn10_pipe_control_lock(dc, pipe_ctx, false);
|
||||
|
|
|
@ -79,6 +79,7 @@ bool dal_hw_factory_init(
|
|||
dal_hw_factory_dce110_init(factory);
|
||||
return true;
|
||||
case DCE_VERSION_12_0:
|
||||
case DCE_VERSION_12_1:
|
||||
dal_hw_factory_dce120_init(factory);
|
||||
return true;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
|
|
|
@ -76,6 +76,7 @@ bool dal_hw_translate_init(
|
|||
dal_hw_translate_dce110_init(translate);
|
||||
return true;
|
||||
case DCE_VERSION_12_0:
|
||||
case DCE_VERSION_12_1:
|
||||
dal_hw_translate_dce120_init(translate);
|
||||
return true;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
|
|
|
@ -90,6 +90,7 @@ struct i2caux *dal_i2caux_create(
|
|||
case DCE_VERSION_10_0:
|
||||
return dal_i2caux_dce100_create(ctx);
|
||||
case DCE_VERSION_12_0:
|
||||
case DCE_VERSION_12_1:
|
||||
return dal_i2caux_dce120_create(ctx);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
case DCN_VERSION_1_0:
|
||||
|
|
|
@ -41,6 +41,7 @@ enum as_signal_type {
|
|||
AS_SIGNAL_TYPE_LVDS,
|
||||
AS_SIGNAL_TYPE_DISPLAY_PORT,
|
||||
AS_SIGNAL_TYPE_GPU_PLL,
|
||||
AS_SIGNAL_TYPE_XGMI,
|
||||
AS_SIGNAL_TYPE_UNKNOWN
|
||||
};
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@ enum dce_version {
|
|||
DCE_VERSION_11_2,
|
||||
DCE_VERSION_11_22,
|
||||
DCE_VERSION_12_0,
|
||||
DCE_VERSION_12_1,
|
||||
DCE_VERSION_MAX,
|
||||
DCN_VERSION_1_0,
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
|
||||
|
|
|
@ -49,6 +49,10 @@
|
|||
#include "soc15_common.h"
|
||||
#include "smuio/smuio_9_0_offset.h"
|
||||
#include "smuio/smuio_9_0_sh_mask.h"
|
||||
#include "nbio/nbio_7_4_sh_mask.h"
|
||||
|
||||
#define smnPCIE_LC_SPEED_CNTL 0x11140290
|
||||
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
|
||||
|
||||
static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
|
@ -2282,6 +2286,18 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
|
|||
break;
|
||||
|
||||
case PP_PCIE:
|
||||
soft_min_level = mask ? (ffs(mask) - 1) : 0;
|
||||
soft_max_level = mask ? (fls(mask) - 1) : 0;
|
||||
if (soft_min_level >= NUM_LINK_LEVELS ||
|
||||
soft_max_level >= NUM_LINK_LEVELS)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Failed to set min link dpm level!",
|
||||
return ret);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -2758,9 +2774,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
data->od8_settings.od8_settings_array;
|
||||
OverDriveTable_t *od_table =
|
||||
&(data->smc_state_table.overdrive_table);
|
||||
struct phm_ppt_v3_information *pptable_information =
|
||||
(struct phm_ppt_v3_information *)hwmgr->pptable;
|
||||
PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
struct pp_clock_levels_with_latency clocks;
|
||||
int i, now, size = 0;
|
||||
int ret = 0;
|
||||
uint32_t gen_speed, lane_width;
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
|
@ -2798,6 +2819,28 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
|||
break;
|
||||
|
||||
case PP_PCIE:
|
||||
gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
|
||||
PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
|
||||
>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
|
||||
lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
|
||||
PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
|
||||
>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++)
|
||||
size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
|
||||
(pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," :
|
||||
(pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," :
|
||||
(pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," :
|
||||
(pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "",
|
||||
(pptable->PcieLaneCount[i] == 1) ? "x1" :
|
||||
(pptable->PcieLaneCount[i] == 2) ? "x2" :
|
||||
(pptable->PcieLaneCount[i] == 3) ? "x4" :
|
||||
(pptable->PcieLaneCount[i] == 4) ? "x8" :
|
||||
(pptable->PcieLaneCount[i] == 5) ? "x12" :
|
||||
(pptable->PcieLaneCount[i] == 6) ? "x16" : "",
|
||||
pptable->LclkFreq[i],
|
||||
(gen_speed == pptable->PcieGenSpeed[i]) &&
|
||||
(lane_width == pptable->PcieLaneCount[i]) ?
|
||||
"*" : "");
|
||||
break;
|
||||
|
||||
case OD_SCLK:
|
||||
|
|
|
@ -241,6 +241,7 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
|
|||
|
||||
state->fence = NULL;
|
||||
state->commit = NULL;
|
||||
state->fb_damage_clips = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
|
||||
|
||||
|
@ -285,6 +286,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
|
|||
|
||||
if (state->commit)
|
||||
drm_crtc_commit_put(state->commit);
|
||||
|
||||
drm_property_blob_put(state->fb_damage_clips);
|
||||
}
|
||||
EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
|
|||
state = drm_atomic_state_alloc(fb->dev);
|
||||
if (!state) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
goto out_drop_locks;
|
||||
}
|
||||
state->acquire_ctx = &ctx;
|
||||
|
||||
|
@ -238,6 +238,7 @@ int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
|
|||
kfree(rects);
|
||||
drm_atomic_state_put(state);
|
||||
|
||||
out_drop_locks:
|
||||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
|
||||
|
|
|
@ -1900,11 +1900,11 @@ static struct cmd_info cmd_info[] = {
|
|||
|
||||
{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
|
||||
|
||||
{"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
|
||||
{"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
|
||||
D_BDW_PLUS, 0, 8, NULL},
|
||||
|
||||
{"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
|
||||
ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
|
||||
{"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL,
|
||||
D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
|
||||
|
||||
{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
|
||||
ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
|
||||
|
|
|
@ -437,7 +437,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
|||
|
||||
ret = intel_gvt_debugfs_init(gvt);
|
||||
if (ret)
|
||||
gvt_err("debugfs registeration failed, go on.\n");
|
||||
gvt_err("debugfs registration failed, go on.\n");
|
||||
|
||||
gvt_dbg_core("gvt device initialization is done\n");
|
||||
dev_priv->gvt = gvt;
|
||||
|
|
|
@ -159,6 +159,10 @@ struct intel_vgpu_submission {
|
|||
struct kmem_cache *workloads;
|
||||
atomic_t running_workload_num;
|
||||
struct i915_gem_context *shadow_ctx;
|
||||
union {
|
||||
u64 i915_context_pml4;
|
||||
u64 i915_context_pdps[GEN8_3LVL_PDPES];
|
||||
};
|
||||
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
|
||||
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
|
||||
void *ring_scan_buffer[I915_NUM_ENGINES];
|
||||
|
|
|
@ -475,6 +475,7 @@ static i915_reg_t force_nonpriv_white_list[] = {
|
|||
_MMIO(0x7704),
|
||||
_MMIO(0x7708),
|
||||
_MMIO(0x770c),
|
||||
_MMIO(0x83a8),
|
||||
_MMIO(0xb110),
|
||||
GEN8_L3SQCREG4,//_MMIO(0xb118)
|
||||
_MMIO(0xe100),
|
||||
|
|
|
@ -126,7 +126,7 @@ static const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
|
|||
[FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
|
||||
[AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
|
||||
[AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
|
||||
[ERR_AND_DBG] = "South Error and Debug Interupts Combined",
|
||||
[ERR_AND_DBG] = "South Error and Debug Interrupts Combined",
|
||||
[GMBUS] = "Gmbus",
|
||||
[SDVO_B_HOTPLUG] = "SDVO B hotplug",
|
||||
[CRT_HOTPLUG] = "CRT Hotplug",
|
||||
|
|
|
@ -1079,6 +1079,21 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
|
||||
{
|
||||
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
|
||||
int i;
|
||||
|
||||
if (i915_vm_is_48bit(&i915_ppgtt->vm))
|
||||
px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
|
||||
else {
|
||||
for (i = 0; i < GEN8_3LVL_PDPES; i++)
|
||||
px_dma(i915_ppgtt->pdp.page_directory[i]) =
|
||||
s->i915_context_pdps[i];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_clean_submission - free submission-related resource for vGPU
|
||||
* @vgpu: a vGPU
|
||||
|
@ -1091,6 +1106,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
|
|||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
|
||||
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
|
||||
i915_context_ppgtt_root_restore(s);
|
||||
i915_gem_context_put(s->shadow_ctx);
|
||||
kmem_cache_destroy(s->workloads);
|
||||
}
|
||||
|
@ -1116,6 +1132,21 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
|
|||
s->ops->reset(vgpu, engine_mask);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
|
||||
{
|
||||
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
|
||||
int i;
|
||||
|
||||
if (i915_vm_is_48bit(&i915_ppgtt->vm))
|
||||
s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
|
||||
else {
|
||||
for (i = 0; i < GEN8_3LVL_PDPES; i++)
|
||||
s->i915_context_pdps[i] =
|
||||
px_dma(i915_ppgtt->pdp.page_directory[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_setup_submission - setup submission-related resource for vGPU
|
||||
* @vgpu: a vGPU
|
||||
|
@ -1138,6 +1169,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
|
|||
if (IS_ERR(s->shadow_ctx))
|
||||
return PTR_ERR(s->shadow_ctx);
|
||||
|
||||
i915_context_ppgtt_root_save(s);
|
||||
|
||||
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
|
||||
|
||||
s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
|
||||
|
|
|
@ -77,38 +77,39 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
|
||||
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p,
|
||||
int mem_type)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
||||
struct drm_printer p = drm_debug_printer(TTM_PFX);
|
||||
|
||||
pr_err(" has_type: %d\n", man->has_type);
|
||||
pr_err(" use_type: %d\n", man->use_type);
|
||||
pr_err(" flags: 0x%08X\n", man->flags);
|
||||
pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
|
||||
pr_err(" size: %llu\n", man->size);
|
||||
pr_err(" available_caching: 0x%08X\n", man->available_caching);
|
||||
pr_err(" default_caching: 0x%08X\n", man->default_caching);
|
||||
drm_printf(p, " has_type: %d\n", man->has_type);
|
||||
drm_printf(p, " use_type: %d\n", man->use_type);
|
||||
drm_printf(p, " flags: 0x%08X\n", man->flags);
|
||||
drm_printf(p, " gpu_offset: 0x%08llX\n", man->gpu_offset);
|
||||
drm_printf(p, " size: %llu\n", man->size);
|
||||
drm_printf(p, " available_caching: 0x%08X\n", man->available_caching);
|
||||
drm_printf(p, " default_caching: 0x%08X\n", man->default_caching);
|
||||
if (mem_type != TTM_PL_SYSTEM)
|
||||
(*man->func->debug)(man, &p);
|
||||
(*man->func->debug)(man, p);
|
||||
}
|
||||
|
||||
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
struct drm_printer p = drm_debug_printer(TTM_PFX);
|
||||
int i, ret, mem_type;
|
||||
|
||||
pr_err("No space for %p (%lu pages, %luK, %luM)\n",
|
||||
bo, bo->mem.num_pages, bo->mem.size >> 10,
|
||||
bo->mem.size >> 20);
|
||||
drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
|
||||
bo, bo->mem.num_pages, bo->mem.size >> 10,
|
||||
bo->mem.size >> 20);
|
||||
for (i = 0; i < placement->num_placement; i++) {
|
||||
ret = ttm_mem_type_from_place(&placement->placement[i],
|
||||
&mem_type);
|
||||
if (ret)
|
||||
return;
|
||||
pr_err(" placement[%d]=0x%08X (%d)\n",
|
||||
i, placement->placement[i].flags, mem_type);
|
||||
ttm_mem_type_debug(bo->bdev, mem_type);
|
||||
drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
|
||||
i, placement->placement[i].flags, mem_type);
|
||||
ttm_mem_type_debug(bo->bdev, &p, mem_type);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue