Merge branch 'drm-next-4.18' of git://people.freedesktop.org/~agd5f/linux into drm-next

Last feature request for 4.18.  Mostly vega20 support.
- Vega20 support
- clock and powergating for VCN
- misc bug fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180524152427.32713-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2018-05-25 10:28:14 +10:00
commit dd41fb8547
139 changed files with 6902 additions and 2766 deletions

View File

@ -62,11 +62,13 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o
# add DF block
amdgpu-y += \
df_v1_7.o
df_v1_7.o \
df_v3_6.o
# add GMC block
amdgpu-y += \

View File

@ -1401,6 +1401,8 @@ struct amdgpu_df_funcs {
bool enable);
void (*get_clockgating_state)(struct amdgpu_device *adev,
u32 *flags);
void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
bool enable);
};
/* Define the HW IP blocks will be used in driver , add more if necessary */
enum amd_hw_ip_block_type {

View File

@ -322,3 +322,47 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
return ret;
}
union gfx_info {
struct atom_gfx_info_v2_4 v24;
};
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index;
uint8_t frev, crev;
uint16_t data_offset;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
gfx_info);
if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
union gfx_info *gfx_info = (union gfx_info *)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 4:
adev->gfx.config.max_shader_engines = gfx_info->v24.gc_num_se;
adev->gfx.config.max_cu_per_sh = gfx_info->v24.gc_num_cu_per_sh;
adev->gfx.config.max_sh_per_se = gfx_info->v24.gc_num_sh_per_se;
adev->gfx.config.max_backends_per_se = gfx_info->v24.gc_num_rb_per_se;
adev->gfx.config.max_texture_channel_caches = gfx_info->v24.gc_num_tccs;
adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
adev->gfx.config.gs_prim_buffer_depth =
le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf =
gfx_info->v24.gc_double_offchip_lds_buffer;
adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
return 0;
default:
return -EINVAL;
}
}
return -EINVAL;
}

View File

@ -30,5 +30,6 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
#endif

View File

@ -400,6 +400,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
case CHIP_VEGA12:
strcpy(fw_name, "amdgpu/vega12_smc.bin");
break;
case CHIP_VEGA20:
strcpy(fw_name, "amdgpu/vega20_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;

View File

@ -173,9 +173,14 @@ static void amdgpu_ctx_do_release(struct kref *ref)
ctx = container_of(ref, struct amdgpu_ctx, refcount);
for (i = 0; i < ctx->adev->num_rings; i++)
for (i = 0; i < ctx->adev->num_rings; i++) {
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity);
}
amdgpu_ctx_fini(ref);
}
@ -452,12 +457,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
if (!ctx->adev)
return;
for (i = 0; i < ctx->adev->num_rings; i++)
for (i = 0; i < ctx->adev->num_rings; i++) {
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
if (kref_read(&ctx->refcount) == 1)
drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
}
}
}
@ -474,12 +484,17 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
if (!ctx->adev)
return;
for (i = 0; i < ctx->adev->num_rings; i++)
for (i = 0; i < ctx->adev->num_rings; i++) {
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
if (kref_read(&ctx->refcount) == 1)
drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
}
}
}

View File

@ -86,6 +86,7 @@ static const char *amdgpu_asic_name[] = {
"VEGAM",
"VEGA10",
"VEGA12",
"VEGA20",
"RAVEN",
"LAST",
};
@ -1387,6 +1388,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
case CHIP_KABINI:
case CHIP_MULLINS:
#endif
case CHIP_VEGA20:
default:
return 0;
case CHIP_VEGA10:
@ -1521,6 +1523,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
#endif
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
if (adev->asic_type == CHIP_RAVEN)
adev->family = AMDGPU_FAMILY_RV;
@ -1715,6 +1718,7 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
@ -1814,6 +1818,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
@ -2155,6 +2160,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_FIJI:
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
#endif
@ -3172,7 +3178,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job *job, bool force)
{
struct drm_atomic_state *state = NULL;
int i, r, resched;
if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
@ -3195,10 +3200,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
/* store modesetting */
if (amdgpu_device_has_dc_support(adev))
state = drm_atomic_helper_suspend(adev->ddev);
/* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@ -3238,10 +3239,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
kthread_unpark(ring->sched.thread);
}
if (amdgpu_device_has_dc_support(adev)) {
if (drm_atomic_helper_resume(adev->ddev, state))
dev_info(adev->dev, "drm resume failed:%d\n", r);
} else {
if (!amdgpu_device_has_dc_support(adev)) {
drm_helper_resume_force_mode(adev->ddev);
}

View File

@ -560,6 +560,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
{0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
{0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
/* Vega 20 */
{0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
/* Raven */
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},

View File

@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
uint64_t index;
if (ring != &adev->uvd.ring) {
if (ring != &adev->uvd.inst[ring->me].ring) {
ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
} else {
/* put fence directly behind firmware */
index = ALIGN(adev->uvd.fw->size, 8);
ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
}
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
amdgpu_irq_get(adev, irq_src, irq_type);

View File

@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_crtc *crtc;
uint32_t ui32 = 0;
uint64_t ui64 = 0;
int i, found;
int i, j, found;
int ui32_size = sizeof(ui32);
if (!info->return_size || !info->return_pointer)
@ -348,7 +348,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break;
case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD;
ring_mask = adev->uvd.ring.ready ? 1 : 0;
for (i = 0; i < adev->uvd.num_uvd_inst; i++)
ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 16;
break;
@ -361,8 +362,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break;
case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD;
for (i = 0; i < adev->uvd.num_enc_rings; i++)
ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i);
for (i = 0; i < adev->uvd.num_uvd_inst; i++)
for (j = 0; j < adev->uvd.num_enc_rings; j++)
ring_mask |=
((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
(j + i * adev->uvd.num_enc_rings));
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 1;
break;

View File

@ -52,6 +52,7 @@ static int psp_sw_init(void *handle)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
psp_v3_1_set_psp_funcs(psp);
break;
case CHIP_RAVEN:

View File

@ -66,6 +66,8 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
u32 ring,
struct amdgpu_ring **out_ring)
{
u32 instance;
switch (mapper->hw_ip) {
case AMDGPU_HW_IP_GFX:
*out_ring = &adev->gfx.gfx_ring[ring];
@ -77,13 +79,16 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
*out_ring = &adev->sdma.instance[ring].ring;
break;
case AMDGPU_HW_IP_UVD:
*out_ring = &adev->uvd.ring;
instance = ring;
*out_ring = &adev->uvd.inst[instance].ring;
break;
case AMDGPU_HW_IP_VCE:
*out_ring = &adev->vce.ring[ring];
break;
case AMDGPU_HW_IP_UVD_ENC:
*out_ring = &adev->uvd.ring_enc[ring];
instance = ring / adev->uvd.num_enc_rings;
*out_ring =
&adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
break;
case AMDGPU_HW_IP_VCN_DEC:
*out_ring = &adev->vcn.ring_dec;
@ -240,13 +245,14 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
ip_num_rings = adev->sdma.num_instances;
break;
case AMDGPU_HW_IP_UVD:
ip_num_rings = 1;
ip_num_rings = adev->uvd.num_uvd_inst;
break;
case AMDGPU_HW_IP_VCE:
ip_num_rings = adev->vce.num_rings;
break;
case AMDGPU_HW_IP_UVD_ENC:
ip_num_rings = adev->uvd.num_enc_rings;
ip_num_rings =
adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
break;
case AMDGPU_HW_IP_VCN_DEC:
ip_num_rings = 1;

View File

@ -362,6 +362,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
ring->adev->rings[ring->idx] = NULL;
}

View File

@ -29,7 +29,7 @@
#include <drm/drm_print.h>
/* max number of rings */
#define AMDGPU_MAX_RINGS 18
#define AMDGPU_MAX_RINGS 21
#define AMDGPU_MAX_GFX_RINGS 1
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3

View File

@ -63,16 +63,44 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
/*
* Global memory.
*/
/**
* amdgpu_ttm_mem_global_init - Initialize and acquire reference to
* memory object
*
* @ref: Object for initialization.
*
* This is called by drm_global_item_ref() when an object is being
* initialized.
*/
static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
/**
* amdgpu_ttm_mem_global_release - Drop reference to a memory object
*
* @ref: Object being removed
*
* This is called by drm_global_item_unref() when an object is being
* released.
*/
static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
/**
* amdgpu_ttm_global_init - Initialize global TTM memory reference
* structures.
*
* @adev: AMDGPU device for which the global structures need to be
* registered.
*
* This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
* during bring up.
*/
static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
@ -80,7 +108,9 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
struct drm_sched_rq *rq;
int r;
/* ensure reference is false in case init fails */
adev->mman.mem_global_referenced = false;
global_ref = &adev->mman.mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
@ -146,6 +176,18 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
return 0;
}
/**
* amdgpu_init_mem_type - Initialize a memory manager for a specific
* type of memory request.
*
* @bdev: The TTM BO device object (contains a reference to
* amdgpu_device)
* @type: The type of memory requested
* @man:
*
* This is called by ttm_bo_init_mm() when a buffer object is being
* initialized.
*/
static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@ -161,6 +203,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
/* GTT memory */
man->func = &amdgpu_gtt_mgr_func;
man->gpu_offset = adev->gmc.gart_start;
man->available_caching = TTM_PL_MASK_CACHING;
@ -193,6 +236,14 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
return 0;
}
/**
* amdgpu_evict_flags - Compute placement flags
*
* @bo: The buffer object to evict
* @placement: Possible destination(s) for evicted BO
*
* Fill in placement data when ttm_bo_evict() is called
*/
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
@ -204,12 +255,14 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
};
/* Don't handle scatter gather BOs */
if (bo->type == ttm_bo_type_sg) {
placement->num_placement = 0;
placement->num_busy_placement = 0;
return;
}
/* Object isn't an AMDGPU object so ignore */
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
placement->busy_placement = &placements;
@ -217,10 +270,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
placement->num_busy_placement = 1;
return;
}
abo = ttm_to_amdgpu_bo(bo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
if (!adev->mman.buffer_funcs_enabled) {
/* Move to system memory */
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
@ -238,6 +293,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo->placement.busy_placement = &abo->placements[1];
abo->placement.num_busy_placement = 1;
} else {
/* Move to GTT memory */
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
}
break;
@ -248,6 +304,15 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
*placement = abo->placement;
}
/**
* amdgpu_verify_access - Verify access for a mmap call
*
* @bo: The buffer object to map
* @filp: The file pointer from the process performing the mmap
*
* This is called by ttm_bo_mmap() to verify whether a process
* has the right to mmap a BO to their process space.
*/
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
@ -265,6 +330,15 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
filp->private_data);
}
/**
* amdgpu_move_null - Register memory for a buffer object
*
* @bo: The bo to assign the memory to
* @new_mem: The memory to be assigned.
*
* Assign the memory from new_mem to the memory of the buffer object
* bo.
*/
static void amdgpu_move_null(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
@ -275,6 +349,10 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL;
}
/**
* amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT
* buffer.
*/
static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
struct drm_mm_node *mm_node,
struct ttm_mem_reg *mem)
@ -289,9 +367,10 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
}
/**
* amdgpu_find_mm_node - Helper function finds the drm_mm_node
* corresponding to @offset. It also modifies the offset to be
* within the drm_mm_node returned
* amdgpu_find_mm_node - Helper function finds the drm_mm_node
* corresponding to @offset. It also modifies
* the offset to be within the drm_mm_node
* returned
*/
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
unsigned long *offset)
@ -430,7 +509,12 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
return r;
}
/**
* amdgpu_move_blit - Copy an entire buffer to another buffer
*
* This is a helper called by amdgpu_bo_move() and
* amdgpu_move_vram_ram() to help move buffers to and from VRAM.
*/
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem,
@ -465,6 +549,11 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
return r;
}
/**
* amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
*
* Called by amdgpu_bo_move().
*/
static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
@ -477,6 +566,8 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
int r;
adev = amdgpu_ttm_adev(bo->bdev);
/* create space/pages for new_mem in GTT space */
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@ -491,25 +582,36 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
return r;
}
/* set caching flags */
r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
if (unlikely(r)) {
goto out_cleanup;
}
/* Bind the memory to the GTT space */
r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
if (unlikely(r)) {
goto out_cleanup;
}
/* blit VRAM to GTT */
r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
/* move BO (in tmp_mem) to new_mem */
r = ttm_bo_move_ttm(bo, ctx, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
}
/**
* amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
*
* Called by amdgpu_bo_move().
*/
static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
@ -522,6 +624,8 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
int r;
adev = amdgpu_ttm_adev(bo->bdev);
/* make space in GTT for old_mem buffer */
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@ -535,10 +639,14 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
if (unlikely(r)) {
return r;
}
/* move/bind old memory to GTT space */
r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
/* copy to VRAM */
r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
@ -548,6 +656,11 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
return r;
}
/**
* amdgpu_bo_move - Move a buffer object to a new memory location
*
* Called by ttm_bo_handle_move_mem()
*/
static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
@ -613,6 +726,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return 0;
}
/**
* amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
*
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/
static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@ -690,6 +808,14 @@ struct amdgpu_ttm_tt {
uint32_t last_set_pages;
};
/**
* amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to
* by a USERPTR pointer to memory
*
* Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
* This provides a wrapper around the get_user_pages() call to provide
* device accessible pages that back user memory.
*/
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@ -719,6 +845,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
}
}
/* loop enough times using contiguous pages of memory */
do {
unsigned num_pages = ttm->num_pages - pinned;
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
@ -757,6 +884,14 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
return r;
}
/**
* amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages
* as necessary.
*
* Called by amdgpu_cs_list_validate(). This creates the page list
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@ -771,6 +906,11 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
}
}
/**
* amdgpu_ttm_tt_mark_user_page - Mark pages as dirty
*
* Called while unpinning userptr pages
*/
void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@ -789,7 +929,12 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
}
}
/* prepare the sg table with the user pages */
/**
* amdgpu_ttm_tt_pin_userptr - prepare the sg table with the
* user pages
*
* Called by amdgpu_ttm_backend_bind()
**/
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
@ -801,17 +946,20 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
/* Allocate an SG array and squash pages into it */
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
ttm->num_pages << PAGE_SHIFT,
GFP_KERNEL);
if (r)
goto release_sg;
/* Map SG to device */
r = -ENOMEM;
nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
if (nents != ttm->sg->nents)
goto release_sg;
/* convert SG to linear array of pages and dma addresses */
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
@ -822,6 +970,9 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
return r;
}
/**
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
@ -835,9 +986,10 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
if (!ttm->sg->sgl)
return;
/* free the sg table and pages again */
/* unmap the pages mapped to the device */
dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
/* mark the pages as dirty */
amdgpu_ttm_tt_mark_user_pages(ttm);
sg_free_table(ttm->sg);
@ -882,6 +1034,12 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
return r;
}
/**
* amdgpu_ttm_backend_bind - Bind GTT memory
*
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
* This handles binding GTT memory to the device address space.
*/
static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
@ -912,7 +1070,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
return 0;
}
/* compute PTE flags relevant to this BO memory */
flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
/* bind pages into GART page tables */
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags);
@ -923,6 +1084,9 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
return r;
}
/**
* amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
*/
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
@ -938,6 +1102,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
return 0;
/* allocate GTT space */
tmp = bo->mem;
tmp.mm_node = NULL;
placement.num_placement = 1;
@ -953,7 +1118,10 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
if (unlikely(r))
return r;
/* compute PTE flags for this buffer object */
flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
/* Bind pages */
gtt->offset = (u64)tmp.start << PAGE_SHIFT;
r = amdgpu_ttm_gart_bind(adev, bo, flags);
if (unlikely(r)) {
@ -969,6 +1137,12 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
return 0;
}
/**
* amdgpu_ttm_recover_gart - Rebind GTT pages
*
* Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
* rebind GTT pages during a GPU reset.
*/
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
@ -984,12 +1158,19 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
return r;
}
/**
* amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
*
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy().
*/
static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr)
amdgpu_ttm_tt_unpin_userptr(ttm);
@ -1021,6 +1202,13 @@ static struct ttm_backend_func amdgpu_backend_func = {
.destroy = &amdgpu_ttm_backend_destroy,
};
/**
* amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
*
* @bo: The buffer object to create a GTT ttm_tt object around
*
* Called by ttm_tt_create().
*/
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
@ -1034,6 +1222,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
return NULL;
}
gtt->ttm.ttm.func = &amdgpu_backend_func;
/* allocate space for the uninitialized page entries */
if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
@ -1041,6 +1231,12 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
return &gtt->ttm.ttm;
}
/**
* amdgpu_ttm_tt_populate - Map GTT pages visible to the device
*
* Map the pages of a ttm_tt object to an address space visible
* to the underlying device.
*/
static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{
@ -1048,6 +1244,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
struct amdgpu_ttm_tt *gtt = (void *)ttm;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
if (gtt && gtt->userptr) {
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg)
@ -1072,9 +1269,17 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
}
#endif
/* fall back to generic helper to populate the page array
* and map them to the device */
return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
}
/**
* amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
*
* Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it.
*/
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct amdgpu_device *adev;
@ -1100,9 +1305,21 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
/* fall back to generic helper to unmap and unpopulate array */
ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
}
/**
* amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt
* for the current task
*
* @ttm: The ttm_tt object to bind this userptr object to
* @addr: The address in the current tasks VM space to use
* @flags: Requirements of userptr object.
*
* Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
* to current task
*/
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags)
{
@ -1127,6 +1344,9 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
return 0;
}
/**
* amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
*/
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@ -1140,6 +1360,12 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
return gtt->usertask->mm;
}
/**
* amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays
* inside an address range for the
* current task.
*
*/
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end)
{
@ -1150,10 +1376,16 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
if (gtt == NULL || !gtt->userptr)
return false;
/* Return false if no part of the ttm_tt object lies within
* the range
*/
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
/* Search the lists of tasks that hold this mapping and see
* if current is one of them. If it is return false.
*/
spin_lock(&gtt->guptasklock);
list_for_each_entry(entry, &gtt->guptasks, list) {
if (entry->task == current) {
@ -1168,6 +1400,10 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
return true;
}
/**
* amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been
* invalidated?
*/
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated)
{
@ -1178,6 +1414,12 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
return prev_invalidated != *last_invalidated;
}
/**
* amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this
* ttm_tt object been invalidated
* since the last time they've
* been set?
*/
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@ -1188,6 +1430,9 @@ bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
}
/**
* amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
*/
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@ -1198,6 +1443,12 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
}
/**
* amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
*
* @ttm: The ttm_tt object to compute the flags for
* @mem: The memory registry backing this ttm_tt object
*/
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem)
{
@ -1222,6 +1473,16 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
return flags;
}
/**
* amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict
* a buffer object.
*
* Return true if eviction is sensible. Called by
* ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space()
* which tries to evict buffer objects until it can find space
* for a new object and by ttm_bo_force_list_clean() which is
* used to clean out a memory space.
*/
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
@ -1268,6 +1529,19 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
return ttm_bo_eviction_valuable(bo, place);
}
/**
* amdgpu_ttm_access_memory - Read or Write memory that backs a
* buffer object.
*
* @bo: The buffer object to read/write
* @offset: Offset into buffer object
* @buf: Secondary buffer to write/read from
* @len: Length in bytes of access
* @write: true if writing
*
* This is used to access VRAM that backs a buffer object via MMIO
* access for debugging purposes.
*/
static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
unsigned long offset,
void *buf, int len, int write)
@ -1444,13 +1718,22 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
adev->fw_vram_usage.reserved_bo = NULL;
return r;
}
/**
* amdgpu_ttm_init - Init the memory management (ttm) as well as
* various gtt/vram related fields.
*
* This initializes all of the memory space pools that the TTM layer
* will need such as the GTT space (system memory mapped to the device),
* VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
* can be mapped per VMID.
*/
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
uint64_t gtt_size;
int r;
u64 vis_vram_limit;
/* initialize global references for vram/gtt */
r = amdgpu_ttm_global_init(adev);
if (r) {
return r;
@ -1471,6 +1754,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* We opt to avoid OOM on system pages allocations */
adev->mman.bdev.no_retry = true;
/* Initialize VRAM pool with all of VRAM divided into pages */
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
adev->gmc.real_vram_size >> PAGE_SHIFT);
if (r) {
@ -1500,6 +1784,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
/* allocate memory as required for VGA
* This is used for VGA emulation and pre-OS scanout buffers to
* avoid display artifacts while transitioning between pre-OS
* and driver. */
if (adev->gmc.stolen_size) {
r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
@ -1511,6 +1799,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
/* Compute GTT size, either bsaed on 3/4th the size of RAM size
* or whatever the user passed on module init */
if (amdgpu_gtt_size == -1) {
struct sysinfo si;
@ -1521,6 +1811,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
else
gtt_size = (uint64_t)amdgpu_gtt_size << 20;
/* Initialize GTT memory pool */
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
@ -1529,6 +1821,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
(unsigned)(gtt_size / (1024 * 1024)));
/* Initialize various on-chip memory pools */
adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
@ -1568,6 +1861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
}
/* Register debugfs entries for amdgpu_ttm */
r = amdgpu_ttm_debugfs_init(adev);
if (r) {
DRM_ERROR("Failed to init debugfs\n");
@ -1576,11 +1870,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return 0;
}
/**
* amdgpu_ttm_late_init - Handle any late initialization for
* amdgpu_ttm
*/
void amdgpu_ttm_late_init(struct amdgpu_device *adev)
{
/* return the VGA stolen memory (if any) back to VRAM */
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
}
/**
* amdgpu_ttm_fini - De-initialize the TTM memory pools
*/
void amdgpu_ttm_fini(struct amdgpu_device *adev)
{
if (!adev->mman.initialized)
@ -1908,6 +2210,11 @@ static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
#endif
};
/**
* amdgpu_ttm_vram_read - Linear read access to VRAM
*
* Accesses VRAM via MMIO for debugging purposes.
*/
static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@ -1947,6 +2254,11 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result;
}
/**
* amdgpu_ttm_vram_write - Linear write access to VRAM
*
* Accesses VRAM via MMIO for debugging purposes.
*/
static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
@ -1995,6 +2307,9 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
/**
* amdgpu_ttm_gtt_read - Linear read access to GTT memory
*/
static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@ -2042,6 +2357,13 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
#endif
/**
* amdgpu_iomem_read - Virtual read access to GPU mapped memory
*
* This function is used to read memory that has been mapped to the
* GPU and the known addresses are not physical addresses but instead
* bus addresses (e.g., what you'd put in an IB or ring buffer).
*/
static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@ -2050,6 +2372,7 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
ssize_t result = 0;
int r;
/* retrieve the IOMMU domain if any for this device */
dom = iommu_get_domain_for_dev(adev->dev);
while (size) {
@ -2062,6 +2385,10 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
bytes = bytes < size ? bytes : size;
/* Translate the bus address to a physical address. If
* the domain is NULL it means there is no IOMMU active
* and the address translation is the identity
*/
addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
pfn = addr >> PAGE_SHIFT;
@ -2086,6 +2413,13 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
return result;
}
/**
* amdgpu_iomem_write - Virtual write access to GPU mapped memory
*
* This function is used to write memory that has been mapped to the
* GPU and the known addresses are not physical addresses but instead
* bus addresses (e.g., what you'd put in an IB or ring buffer).
*/
static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{

View File

@ -307,6 +307,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
case CHIP_VEGA20:
return AMDGPU_FW_LOAD_DIRECT;
default:
DRM_ERROR("Unknown firmware load type\n");
}

View File

@ -70,12 +70,14 @@
#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
#define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin"
#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00)
#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00)
#define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00)
#define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00)
#define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00)
/* These are common relative offsets for all asics, from uvd_7_0_offset.h, */
#define UVD_GPCOM_VCPU_CMD 0x03c3
#define UVD_GPCOM_VCPU_DATA0 0x03c4
#define UVD_GPCOM_VCPU_DATA1 0x03c5
#define UVD_NO_OP 0x03ff
#define UVD_BASE_SI 0x3800
/**
* amdgpu_uvd_cs_ctx - Command submission parser context
@ -114,6 +116,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM);
MODULE_FIRMWARE(FIRMWARE_VEGA10);
MODULE_FIRMWARE(FIRMWARE_VEGA12);
MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@ -125,9 +128,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned version_major, version_minor, family_id;
int i, r;
int i, j, r;
INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
@ -177,6 +180,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_VEGAM:
fw_name = FIRMWARE_VEGAM;
break;
case CHIP_VEGA20:
fw_name = FIRMWARE_VEGA20;
break;
default:
return -EINVAL;
}
@ -231,28 +237,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
&adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
}
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
ring = &adev->uvd.ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
rq, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up UVD run queue.\n");
return r;
}
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
}
for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.handles[i], 0);
adev->uvd.filp[i] = NULL;
}
ring = &adev->uvd.inst[j].ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
rq, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
return r;
}
for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.inst[j].handles[i], 0);
adev->uvd.inst[j].filp[i] = NULL;
}
}
/* from uvd v5.0 HW addressing capacity increased to 64 bits */
if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
@ -279,20 +287,22 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i;
kfree(adev->uvd.saved_bo);
int i, j;
drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
kfree(adev->uvd.inst[j].saved_bo);
amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
&adev->uvd.gpu_addr,
(void **)&adev->uvd.cpu_addr);
drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
amdgpu_ring_fini(&adev->uvd.ring);
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr,
(void **)&adev->uvd.inst[j].cpu_addr);
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
amdgpu_ring_fini(&adev->uvd.inst[j].ring);
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
release_firmware(adev->uvd.fw);
return 0;
@ -302,32 +312,33 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
unsigned size;
void *ptr;
int i;
int i, j;
if (adev->uvd.vcpu_bo == NULL)
return 0;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.inst[j].vcpu_bo == NULL)
continue;
cancel_delayed_work_sync(&adev->uvd.idle_work);
cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
/* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
break;
/* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.inst[j].handles[i]))
break;
if (i == adev->uvd.max_handles)
return 0;
if (i == adev->uvd.max_handles)
continue;
}
size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
ptr = adev->uvd.inst[j].cpu_addr;
adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
}
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
ptr = adev->uvd.cpu_addr;
adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
if (!adev->uvd.saved_bo)
return -ENOMEM;
memcpy_fromio(adev->uvd.saved_bo, ptr, size);
return 0;
}
@ -335,59 +346,65 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
{
unsigned size;
void *ptr;
int i;
if (adev->uvd.vcpu_bo == NULL)
return -EINVAL;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
if (adev->uvd.inst[i].vcpu_bo == NULL)
return -EINVAL;
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
ptr = adev->uvd.cpu_addr;
size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
ptr = adev->uvd.inst[i].cpu_addr;
if (adev->uvd.saved_bo != NULL) {
memcpy_toio(ptr, adev->uvd.saved_bo, size);
kfree(adev->uvd.saved_bo);
adev->uvd.saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
unsigned offset;
if (adev->uvd.inst[i].saved_bo != NULL) {
memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
kfree(adev->uvd.inst[i].saved_bo);
adev->uvd.inst[i].saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
unsigned offset;
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes);
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
memset_io(ptr, 0, size);
/* to restore uvd fence seq */
amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
}
memset_io(ptr, 0, size);
/* to restore uvd fence seq */
amdgpu_fence_driver_force_completion(&adev->uvd.ring);
}
return 0;
}
void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
{
struct amdgpu_ring *ring = &adev->uvd.ring;
int i, r;
struct amdgpu_ring *ring;
int i, j, r;
for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) {
struct dma_fence *fence;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
ring = &adev->uvd.inst[j].ring;
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
if (r) {
DRM_ERROR("Error destroying UVD (%d)!\n", r);
continue;
for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
struct dma_fence *fence;
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
if (r) {
DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
continue;
}
dma_fence_wait(fence, false);
dma_fence_put(fence);
adev->uvd.inst[j].filp[i] = NULL;
atomic_set(&adev->uvd.inst[j].handles[i], 0);
}
dma_fence_wait(fence, false);
dma_fence_put(fence);
adev->uvd.filp[i] = NULL;
atomic_set(&adev->uvd.handles[i], 0);
}
}
}
@ -662,15 +679,16 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
void *ptr;
long r;
int i;
uint32_t ip_instance = ctx->parser->job->ring->me;
if (offset & 0x3F) {
DRM_ERROR("UVD messages must be 64 byte aligned!\n");
DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
return -EINVAL;
}
r = amdgpu_bo_kmap(bo, &ptr);
if (r) {
DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
return r;
}
@ -680,7 +698,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
handle = msg[2];
if (handle == 0) {
DRM_ERROR("Invalid UVD handle!\n");
DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
return -EINVAL;
}
@ -691,18 +709,18 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* try to alloc a new handle */
for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) {
DRM_ERROR("Handle 0x%x already in use!\n", handle);
if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
return -EINVAL;
}
if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
adev->uvd.filp[i] = ctx->parser->filp;
if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
return 0;
}
}
DRM_ERROR("No more free UVD handles!\n");
DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
return -ENOSPC;
case 1:
@ -714,27 +732,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* validate the handle */
for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) {
if (adev->uvd.filp[i] != ctx->parser->filp) {
DRM_ERROR("UVD handle collision detected!\n");
if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
return -EINVAL;
}
return 0;
}
}
DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
return -ENOENT;
case 2:
/* it's a destroy msg, free the handle */
for (i = 0; i < adev->uvd.max_handles; ++i)
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
amdgpu_bo_kunmap(bo);
return 0;
default:
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
return -EINVAL;
}
BUG();
@ -805,7 +823,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
}
if ((cmd == 0 || cmd == 0x3) &&
(start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
(start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
start, end);
return -EINVAL;
@ -973,6 +991,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
uint64_t addr;
long r;
int i;
unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
amdgpu_bo_kunmap(bo);
amdgpu_bo_unpin(bo);
@ -992,17 +1012,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err;
if (adev->asic_type >= CHIP_VEGA10) {
data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0);
data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0);
data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0);
data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0);
} else {
data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
data[3] = PACKET0(mmUVD_NO_OP, 0);
offset_idx = 1 + ring->me;
offset[1] = adev->reg_offset[UVD_HWIP][0][1];
offset[2] = adev->reg_offset[UVD_HWIP][1][1];
}
data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
ib->ptr[0] = data[0];
@ -1038,7 +1057,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
goto err_free;
r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err_free;
@ -1126,8 +1145,15 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, uvd.idle_work.work);
unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
unsigned fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
}
}
if (fences == 0) {
if (adev->pm.dpm_enabled) {
@ -1141,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
}
}
@ -1153,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev))
return;
set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
if (set_clocks) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, true);
@ -1170,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
{
if (!amdgpu_sriov_vf(ring->adev))
schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
}
/**
@ -1184,27 +1210,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence;
long r;
uint32_t ip_instance = ring->me;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
if (r) {
DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
goto error;
}
r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
if (r) {
DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
goto error;
}
r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
r = -ETIMEDOUT;
} else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
} else {
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
r = 0;
}
@ -1232,7 +1259,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
* necessarily linear. So we need to count
* all non-zero handles.
*/
if (atomic_read(&adev->uvd.handles[i]))
if (atomic_read(&adev->uvd.inst->handles[i]))
used_handles++;
}

View File

@ -31,30 +31,37 @@
#define AMDGPU_UVD_SESSION_SIZE (50*1024)
#define AMDGPU_UVD_FIRMWARE_OFFSET 256
#define AMDGPU_MAX_UVD_INSTANCES 2
#define AMDGPU_UVD_FIRMWARE_SIZE(adev) \
(AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \
8) - AMDGPU_UVD_FIRMWARE_OFFSET)
struct amdgpu_uvd {
struct amdgpu_uvd_inst {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
unsigned fw_version;
void *saved_bo;
unsigned max_handles;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
const struct firmware *fw; /* UVD firmware */
struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq;
bool address_64_bit;
bool use_ctx_buf;
struct drm_sched_entity entity;
struct drm_sched_entity entity_enc;
uint32_t srbm_soft_reset;
};
struct amdgpu_uvd {
const struct firmware *fw; /* UVD firmware */
unsigned fw_version;
unsigned max_handles;
unsigned num_enc_rings;
uint8_t num_uvd_inst;
bool address_64_bit;
bool use_ctx_buf;
struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);

View File

@ -57,6 +57,7 @@
#define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
#define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@ -76,6 +77,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM);
MODULE_FIRMWARE(FIRMWARE_VEGA10);
MODULE_FIRMWARE(FIRMWARE_VEGA12);
MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
@ -143,6 +145,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_VEGA12:
fw_name = FIRMWARE_VEGA12;
break;
case CHIP_VEGA20:
fw_name = FIRMWARE_VEGA20;
break;
default:
return -EINVAL;

View File

@ -205,13 +205,18 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, vcn.idle_work.work);
unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
unsigned i;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
}
if (fences == 0) {
if (adev->pm.dpm_enabled) {
/* might be used when with pg/cg
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
*/
}
else
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
} else {
schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
}
@ -223,9 +228,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
if (set_clocks && adev->pm.dpm_enabled) {
/* might be used when with pg/cg
amdgpu_dpm_enable_uvd(adev, true);
*/
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
else
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
}
}

View File

@ -45,6 +45,17 @@
#define VCN_ENC_CMD_REG_WRITE 0x0000000b
#define VCN_ENC_CMD_REG_WAIT 0x0000000c
enum engine_status_constants {
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002,
UVD_STATUS__UVD_BUSY = 0x00000004,
GB_ADDR_CONFIG_DEFAULT = 0x26010011,
UVD_STATUS__IDLE = 0x2,
UVD_STATUS__BUSY = 0x5,
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF = 0x1,
UVD_STATUS__RBC_BUSY = 0x1,
};
struct amdgpu_vcn {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;

View File

@ -119,9 +119,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
* is currently evicted. add the bo to the evicted list to make sure it
* is validated on next vm use to avoid fault.
* */
spin_lock(&vm->status_lock);
list_move_tail(&base->vm_status, &vm->evicted);
spin_unlock(&vm->status_lock);
}
/**
@ -226,24 +224,16 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param)
{
struct ttm_bo_global *glob = adev->mman.bdev.glob;
int r;
struct amdgpu_vm_bo_base *bo_base, *tmp;
int r = 0;
spin_lock(&vm->status_lock);
while (!list_empty(&vm->evicted)) {
struct amdgpu_vm_bo_base *bo_base;
struct amdgpu_bo *bo;
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
bo_base = list_first_entry(&vm->evicted,
struct amdgpu_vm_bo_base,
vm_status);
spin_unlock(&vm->status_lock);
bo = bo_base->bo;
BUG_ON(!bo);
if (bo->parent) {
r = validate(param, bo);
if (r)
return r;
break;
spin_lock(&glob->lru_lock);
ttm_bo_move_to_lru_tail(&bo->tbo);
@ -252,22 +242,29 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_unlock(&glob->lru_lock);
}
if (bo->tbo.type == ttm_bo_type_kernel &&
vm->use_cpu_for_update) {
r = amdgpu_bo_kmap(bo, NULL);
if (r)
return r;
}
spin_lock(&vm->status_lock);
if (bo->tbo.type != ttm_bo_type_kernel)
if (bo->tbo.type != ttm_bo_type_kernel) {
spin_lock(&vm->moved_lock);
list_move(&bo_base->vm_status, &vm->moved);
else
spin_unlock(&vm->moved_lock);
} else {
list_move(&bo_base->vm_status, &vm->relocated);
}
}
spin_unlock(&vm->status_lock);
return 0;
spin_lock(&glob->lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
if (!bo->parent)
continue;
ttm_bo_move_to_lru_tail(&bo->tbo);
if (bo->shadow)
ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
}
spin_unlock(&glob->lru_lock);
return r;
}
/**
@ -279,13 +276,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
bool ready;
spin_lock(&vm->status_lock);
ready = list_empty(&vm->evicted);
spin_unlock(&vm->status_lock);
return ready;
return list_empty(&vm->evicted);
}
/**
@ -477,9 +468,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
spin_lock(&vm->status_lock);
list_move(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock);
}
if (level < AMDGPU_VM_PTB) {
@ -926,10 +915,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
if (!entry->base.bo)
continue;
spin_lock(&vm->status_lock);
if (list_empty(&entry->base.vm_status))
list_add(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock);
if (!entry->base.moved)
list_move(&entry->base.vm_status, &vm->relocated);
amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
}
}
@ -959,6 +946,14 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
params.adev = adev;
if (vm->use_cpu_for_update) {
struct amdgpu_vm_bo_base *bo_base;
list_for_each_entry(bo_base, &vm->relocated, vm_status) {
r = amdgpu_bo_kmap(bo_base->bo, NULL);
if (unlikely(r))
return r;
}
r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
if (unlikely(r))
return r;
@ -974,7 +969,6 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
params.func = amdgpu_vm_do_set_ptes;
}
spin_lock(&vm->status_lock);
while (!list_empty(&vm->relocated)) {
struct amdgpu_vm_bo_base *bo_base, *parent;
struct amdgpu_vm_pt *pt, *entry;
@ -983,14 +977,12 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
bo_base = list_first_entry(&vm->relocated,
struct amdgpu_vm_bo_base,
vm_status);
list_del_init(&bo_base->vm_status);
spin_unlock(&vm->status_lock);
bo_base->moved = false;
list_move(&bo_base->vm_status, &vm->idle);
bo = bo_base->bo->parent;
if (!bo) {
spin_lock(&vm->status_lock);
if (!bo)
continue;
}
parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
bo_list);
@ -999,12 +991,10 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
amdgpu_vm_update_pde(&params, vm, pt, entry);
spin_lock(&vm->status_lock);
if (!vm->use_cpu_for_update &&
(ndw - params.ib->length_dw) < 32)
break;
}
spin_unlock(&vm->status_lock);
if (vm->use_cpu_for_update) {
/* Flush HDP */
@ -1107,9 +1097,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
if (entry->huge) {
/* Add the entry to the relocated list to update it. */
entry->huge = false;
spin_lock(&p->vm->status_lock);
list_move(&entry->base.vm_status, &p->vm->relocated);
spin_unlock(&p->vm->status_lock);
}
return;
}
@ -1588,18 +1576,22 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
amdgpu_asic_flush_hdp(adev, NULL);
}
spin_lock(&vm->status_lock);
spin_lock(&vm->moved_lock);
list_del_init(&bo_va->base.vm_status);
spin_unlock(&vm->moved_lock);
/* If the BO is not in its preferred location add it back to
* the evicted list so that it gets validated again on the
* next command submission.
*/
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
!(bo->preferred_domains &
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
list_add_tail(&bo_va->base.vm_status, &vm->evicted);
spin_unlock(&vm->status_lock);
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
uint32_t mem_type = bo->tbo.mem.mem_type;
if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
list_add_tail(&bo_va->base.vm_status, &vm->evicted);
else
list_add(&bo_va->base.vm_status, &vm->idle);
}
list_splice_init(&bo_va->invalids, &bo_va->valids);
bo_va->cleared = clear;
@ -1808,19 +1800,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct amdgpu_bo_va *bo_va, *tmp;
struct list_head moved;
bool clear;
int r = 0;
int r;
spin_lock(&vm->status_lock);
while (!list_empty(&vm->moved)) {
struct amdgpu_bo_va *bo_va;
struct reservation_object *resv;
INIT_LIST_HEAD(&moved);
spin_lock(&vm->moved_lock);
list_splice_init(&vm->moved, &moved);
spin_unlock(&vm->moved_lock);
bo_va = list_first_entry(&vm->moved,
struct amdgpu_bo_va, base.vm_status);
spin_unlock(&vm->status_lock);
resv = bo_va->base.bo->tbo.resv;
list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) {
struct reservation_object *resv = bo_va->base.bo->tbo.resv;
/* Per VM BOs never need to bo cleared in the page tables */
if (resv == vm->root.base.bo->tbo.resv)
@ -1833,17 +1824,19 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
clear = true;
r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r)
if (r) {
spin_lock(&vm->moved_lock);
list_splice(&moved, &vm->moved);
spin_unlock(&vm->moved_lock);
return r;
}
if (!clear && resv != vm->root.base.bo->tbo.resv)
reservation_object_unlock(resv);
spin_lock(&vm->status_lock);
}
spin_unlock(&vm->status_lock);
return r;
return 0;
}
/**
@ -1902,11 +1895,11 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
if (mapping->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
spin_lock(&vm->status_lock);
if (list_empty(&bo_va->base.vm_status))
list_add(&bo_va->base.vm_status, &vm->moved);
spin_unlock(&vm->status_lock);
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
!bo_va->base.moved) {
spin_lock(&vm->moved_lock);
list_move(&bo_va->base.vm_status, &vm->moved);
spin_unlock(&vm->moved_lock);
}
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
@ -2216,9 +2209,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_del(&bo_va->base.bo_list);
spin_lock(&vm->status_lock);
spin_lock(&vm->moved_lock);
list_del(&bo_va->base.vm_status);
spin_unlock(&vm->status_lock);
spin_unlock(&vm->moved_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
@ -2258,31 +2251,28 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
list_for_each_entry(bo_base, &bo->va, bo_list) {
struct amdgpu_vm *vm = bo_base->vm;
bool was_moved = bo_base->moved;
bo_base->moved = true;
if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
spin_lock(&bo_base->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&bo_base->vm_status, &vm->evicted);
else
list_move_tail(&bo_base->vm_status,
&vm->evicted);
spin_unlock(&bo_base->vm->status_lock);
continue;
}
if (was_moved)
continue;
if (bo->tbo.type == ttm_bo_type_kernel) {
spin_lock(&bo_base->vm->status_lock);
if (list_empty(&bo_base->vm_status))
list_add(&bo_base->vm_status, &vm->relocated);
spin_unlock(&bo_base->vm->status_lock);
continue;
list_move(&bo_base->vm_status, &vm->relocated);
} else {
spin_lock(&bo_base->vm->moved_lock);
list_move(&bo_base->vm_status, &vm->moved);
spin_unlock(&bo_base->vm->moved_lock);
}
spin_lock(&bo_base->vm->status_lock);
if (list_empty(&bo_base->vm_status))
list_add(&bo_base->vm_status, &vm->moved);
spin_unlock(&bo_base->vm->status_lock);
}
}
@ -2391,10 +2381,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->va = RB_ROOT_CACHED;
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->evicted);
INIT_LIST_HEAD(&vm->relocated);
spin_lock_init(&vm->moved_lock);
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
INIT_LIST_HEAD(&vm->freed);
/* create scheduler entity for page table updates */

View File

@ -168,9 +168,6 @@ struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
/* protecting invalidated */
spinlock_t status_lock;
/* BOs who needs a validation */
struct list_head evicted;
@ -179,6 +176,10 @@ struct amdgpu_vm {
/* BOs moved, but not yet updated in the PT */
struct list_head moved;
spinlock_t moved_lock;
/* All BOs of this VM not currently in the state machine */
struct list_head idle;
/* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
@ -187,9 +188,6 @@ struct amdgpu_vm {
struct amdgpu_vm_pt root;
struct dma_fence *last_update;
/* protecting freed */
spinlock_t freed_lock;
/* Scheduler entity for page table updates */
struct drm_sched_entity entity;

View File

@ -473,6 +473,7 @@ static int dce_virtual_hw_init(void *handle)
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
break;
default:
DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);

View File

@ -102,6 +102,13 @@ static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
*flags |= AMD_CG_SUPPORT_DF_MGCG;
}
static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev,
bool enable)
{
WREG32_FIELD15(DF, 0, DF_CS_AON0_CoherentSlaveModeCtrlA0,
ForceParWrRMW, enable);
}
const struct amdgpu_df_funcs df_v1_7_funcs = {
.init = df_v1_7_init,
.enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
@ -109,4 +116,5 @@ const struct amdgpu_df_funcs df_v1_7_funcs = {
.get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
.update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating,
.get_clockgating_state = df_v1_7_get_clockgating_state,
.enable_ecc_force_par_wr_rmw = df_v1_7_enable_ecc_force_par_wr_rmw,
};

View File

@ -0,0 +1,116 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "df_v3_6.h"
#include "df/df_3_6_default.h"
#include "df/df_3_6_offset.h"
#include "df/df_3_6_sh_mask.h"
static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
16, 32, 0, 0, 0, 2, 4, 8};
static void df_v3_6_init(struct amdgpu_device *adev)
{
}
static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
bool enable)
{
u32 tmp;
if (enable) {
tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
} else
WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
mmFabricConfigAccessControl_DEFAULT);
}
static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev)
{
u32 tmp;
tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0);
tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK;
tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
return tmp;
}
static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
{
int fb_channel_number;
fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number))
fb_channel_number = 0;
return df_v3_6_channel_number[fb_channel_number];
}
static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
u32 tmp;
/* Put DF on broadcast mode */
adev->df_funcs->enable_broadcast_mode(adev, true);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
} else {
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
tmp |= DF_V3_6_MGCG_DISABLE;
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
}
/* Exit broadcast mode */
adev->df_funcs->enable_broadcast_mode(adev, false);
}
static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
u32 *flags)
{
u32 tmp;
/* AMD_CG_SUPPORT_DF_MGCG */
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY)
*flags |= AMD_CG_SUPPORT_DF_MGCG;
}
const struct amdgpu_df_funcs df_v3_6_funcs = {
.init = df_v3_6_init,
.enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
.get_fb_channel_number = df_v3_6_get_fb_channel_number,
.get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
.update_medium_grain_clock_gating =
df_v3_6_update_medium_grain_clock_gating,
.get_clockgating_state = df_v3_6_get_clockgating_state,
};

View File

@ -0,0 +1,40 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __DF_V3_6_H__
#define __DF_V3_6_H__
#include "soc15_common.h"
enum DF_V3_6_MGCG {
DF_V3_6_MGCG_DISABLE = 0,
DF_V3_6_MGCG_ENABLE_00_CYCLE_DELAY = 1,
DF_V3_6_MGCG_ENABLE_01_CYCLE_DELAY = 2,
DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY = 13,
DF_V3_6_MGCG_ENABLE_31_CYCLE_DELAY = 14,
DF_V3_6_MGCG_ENABLE_63_CYCLE_DELAY = 15
};
extern const struct amdgpu_df_funcs df_v3_6_funcs;
#endif

View File

@ -27,6 +27,7 @@
#include "amdgpu_gfx.h"
#include "soc15.h"
#include "soc15d.h"
#include "amdgpu_atomfirmware.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
@ -63,6 +64,13 @@ MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
MODULE_FIRMWARE("amdgpu/vega20_me.bin");
MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
MODULE_FIRMWARE("amdgpu/raven_ce.bin");
MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
MODULE_FIRMWARE("amdgpu/raven_me.bin");
@ -72,29 +80,22 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_9_0[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@ -108,6 +109,20 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
};
static const struct soc15_reg_golden golden_settings_gc_9_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
@ -241,6 +256,14 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_2_1_vg12,
ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
break;
case CHIP_VEGA20:
soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0));
soc15_program_register_sequence(adev,
golden_settings_gc_9_0_vg20,
ARRAY_SIZE(golden_settings_gc_9_0_vg20));
break;
case CHIP_RAVEN:
soc15_program_register_sequence(adev,
golden_settings_gc_9_1,
@ -468,6 +491,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
case CHIP_VEGA12:
chip_name = "vega12";
break;
case CHIP_VEGA20:
chip_name = "vega20";
break;
case CHIP_RAVEN:
chip_name = "raven";
break;
@ -1088,9 +1114,10 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
};
static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
int err;
adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
@ -1112,6 +1139,20 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
DRM_INFO("fix gfx.config for vega12\n");
break;
case CHIP_VEGA20:
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22014042;
/* check vbios table if gpu info is not available */
err = amdgpu_atomfirmware_get_gfx_info(adev);
if (err)
return err;
break;
case CHIP_RAVEN:
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
@ -1161,6 +1202,8 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
PIPE_INTERLEAVE_SIZE));
return 0;
}
static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
@ -1394,6 +1437,7 @@ static int gfx_v9_0_sw_init(void *handle)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
adev->gfx.mec.num_mec = 2;
break;
@ -1521,7 +1565,9 @@ static int gfx_v9_0_sw_init(void *handle)
adev->gfx.ce_ram_size = 0x8000;
gfx_v9_0_gpu_early_init(adev);
r = gfx_v9_0_gpu_early_init(adev);
if (r)
return r;
r = gfx_v9_0_ngg_init(adev);
if (r)
@ -3688,6 +3734,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
gfx_v9_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
@ -4680,6 +4727,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
break;

View File

@ -675,6 +675,7 @@ static int gmc_v9_0_late_init(void *handle)
DRM_INFO("ECC is active.\n");
} else if (r == 0) {
DRM_INFO("ECC is not present.\n");
adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
} else {
DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
return r;
@ -693,10 +694,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_device_vram_location(adev, &adev->gmc, base);
amdgpu_device_gart_location(adev, mc);
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU)
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
else
adev->vm_manager.vram_base_offset = 0;
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
}
/**
@ -755,6 +753,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10: /* all engines support GPUVM */
case CHIP_VEGA12: /* all engines support GPUVM */
case CHIP_VEGA20:
default:
adev->gmc.gart_size = 512ULL << 20;
break;
@ -860,6 +859,7 @@ static int gmc_v9_0_sw_init(void *handle)
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
/*
* To fulfill 4-level page support,
* vm size is 256TB (48bit), maximum size of Vega10,
@ -977,6 +977,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA20:
soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,
ARRAY_SIZE(golden_settings_mmhub_1_0_0));

View File

@ -734,6 +734,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
mmhub_v1_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);

View File

@ -34,10 +34,19 @@
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
/* vega20 */
#define mmRCC_DEV0_EPF0_STRAP0_VG20 0x0011
#define mmRCC_DEV0_EPF0_STRAP0_VG20_BASE_IDX 2
static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
if (adev->asic_type == CHIP_VEGA20)
tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_VG20);
else
tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
@ -75,10 +84,14 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
u32 doorbell_range = RREG32(reg);
u32 range = 2;
if (adev->asic_type == CHIP_VEGA20)
range = 8;
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, range);
} else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
@ -133,6 +146,9 @@ static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
{
uint32_t def, data;
if (adev->asic_type == CHIP_VEGA20)
return;
/* NBIF_MGCG_CTRL_LCLK */
def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);

View File

@ -41,6 +41,9 @@ MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
#define smnMP1_FIRMWARE_FLAGS 0x3010028

View File

@ -42,6 +42,8 @@ MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vega20_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
@ -107,6 +109,28 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
};
static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
{
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
};
static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
{
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
@ -139,6 +163,11 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_vg12,
ARRAY_SIZE(golden_settings_sdma_vg12));
break;
case CHIP_VEGA20:
soc15_program_register_sequence(adev,
golden_settings_sdma_4_2,
ARRAY_SIZE(golden_settings_sdma_4_2));
break;
case CHIP_RAVEN:
soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
@ -182,6 +211,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
case CHIP_VEGA12:
chip_name = "vega12";
break;
case CHIP_VEGA20:
chip_name = "vega20";
break;
case CHIP_RAVEN:
chip_name = "raven";
break;
@ -1516,6 +1548,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
sdma_v4_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);

View File

@ -41,8 +41,6 @@
#include "sdma1/sdma1_4_0_offset.h"
#include "hdp/hdp_4_0_offset.h"
#include "hdp/hdp_4_0_sh_mask.h"
#include "mp/mp_9_0_offset.h"
#include "mp/mp_9_0_sh_mask.h"
#include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h"
@ -53,6 +51,7 @@
#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
#include "vega10_ih.h"
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
@ -489,16 +488,24 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_RAVEN:
vega10_reg_base_init(adev);
break;
case CHIP_VEGA20:
vega20_reg_base_init(adev);
break;
default:
return -EINVAL;
}
if (adev->flags & AMD_IS_APU)
adev->nbio_funcs = &nbio_v7_0_funcs;
else if (adev->asic_type == CHIP_VEGA20)
adev->nbio_funcs = &nbio_v7_0_funcs;
else
adev->nbio_funcs = &nbio_v6_1_funcs;
adev->df_funcs = &df_v1_7_funcs;
if (adev->asic_type == CHIP_VEGA20)
adev->df_funcs = &df_v3_6_funcs;
else
adev->df_funcs = &df_v1_7_funcs;
adev->nbio_funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
@ -507,12 +514,15 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->asic_type != CHIP_VEGA20) {
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
}
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@ -660,6 +670,27 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_VEGA20:
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_VCE_MGCG |
AMD_CG_SUPPORT_UVD_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x28;
break;
case CHIP_RAVEN:
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@ -679,8 +710,10 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS;
adev->pg_flags = AMD_PG_SUPPORT_SDMA;
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
@ -872,6 +905,7 @@ static int soc15_common_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
adev->nbio_funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
adev->nbio_funcs->update_medium_grain_light_sleep(adev,

View File

@ -55,5 +55,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
const u32 array_size);
int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
#endif

View File

@ -47,6 +47,21 @@
#define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value)
#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \
do { \
uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
uint32_t loop = adev->usec_timeout; \
while ((tmp_ & (mask)) != (expected_value)) { \
udelay(2); \
tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
loop--; \
if (!loop) { \
ret = -ETIMEDOUT; \
break; \
} \
} \
} while (0)
#endif

View File

@ -93,6 +93,7 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v4_2_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->uvd.num_uvd_inst = 1;
uvd_v4_2_set_ring_funcs(adev);
uvd_v4_2_set_irq_funcs(adev);
@ -107,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle)
int r;
/* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
if (r)
return r;
@ -119,9 +120,9 @@ static int uvd_v4_2_sw_init(void *handle)
if (r)
return r;
ring = &adev->uvd.ring;
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
return r;
}
@ -150,7 +151,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
static int uvd_v4_2_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int r;
@ -208,7 +209,7 @@ static int uvd_v4_2_hw_init(void *handle)
static int uvd_v4_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev);
@ -251,7 +252,7 @@ static int uvd_v4_2_resume(void *handle)
*/
static int uvd_v4_2_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->uvd.ring;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t rb_bufsz;
int i, j, r;
u32 tmp;
@ -523,6 +524,18 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
amdgpu_ring_write(ring, 0);
}
}
/**
* uvd_v4_2_mc_resume - memory controller programming
*
@ -536,7 +549,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
uint32_t size;
/* programm the VCPU memory controller bits 0-27 */
addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
@ -553,11 +566,11 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
/* bits 28-31 */
addr = (adev->uvd.gpu_addr >> 28) & 0xF;
addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
/* bits 32-39 */
addr = (adev->uvd.gpu_addr >> 32) & 0xFF;
addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
@ -664,7 +677,7 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("IH: UVD TRAP\n");
amdgpu_fence_process(&adev->uvd.ring);
amdgpu_fence_process(&adev->uvd.inst->ring);
return 0;
}
@ -732,7 +745,6 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.nop = PACKET0(mmUVD_NO_OP, 0),
.support_64bit_ptrs = false,
.get_rptr = uvd_v4_2_ring_get_rptr,
.get_wptr = uvd_v4_2_ring_get_wptr,
@ -745,7 +757,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.emit_fence = uvd_v4_2_ring_emit_fence,
.test_ring = uvd_v4_2_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.insert_nop = uvd_v4_2_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
@ -753,7 +765,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
{
adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs;
adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
}
static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
@ -763,8 +775,8 @@ static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
{
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
adev->uvd.inst->irq.num_types = 1;
adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
}
const struct amdgpu_ip_block_version uvd_v4_2_ip_block =

View File

@ -89,6 +89,7 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v5_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->uvd.num_uvd_inst = 1;
uvd_v5_0_set_ring_funcs(adev);
uvd_v5_0_set_irq_funcs(adev);
@ -103,7 +104,7 @@ static int uvd_v5_0_sw_init(void *handle)
int r;
/* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
if (r)
return r;
@ -115,9 +116,9 @@ static int uvd_v5_0_sw_init(void *handle)
if (r)
return r;
ring = &adev->uvd.ring;
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
return r;
}
@ -144,7 +145,7 @@ static int uvd_v5_0_sw_fini(void *handle)
static int uvd_v5_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int r;
@ -204,7 +205,7 @@ static int uvd_v5_0_hw_init(void *handle)
static int uvd_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev);
@ -253,9 +254,9 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
/* programm memory controller bits 0-27 */
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.gpu_addr));
lower_32_bits(adev->uvd.inst->gpu_addr));
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->uvd.gpu_addr));
upper_32_bits(adev->uvd.inst->gpu_addr));
offset = AMDGPU_UVD_FIRMWARE_OFFSET;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
@ -287,7 +288,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
*/
static int uvd_v5_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->uvd.ring;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
uint32_t mp_swap_cntl;
@ -540,6 +541,18 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
amdgpu_ring_write(ring, 0);
}
}
static bool uvd_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -586,7 +599,7 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("IH: UVD TRAP\n");
amdgpu_fence_process(&adev->uvd.ring);
amdgpu_fence_process(&adev->uvd.inst->ring);
return 0;
}
@ -840,7 +853,6 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.nop = PACKET0(mmUVD_NO_OP, 0),
.support_64bit_ptrs = false,
.get_rptr = uvd_v5_0_ring_get_rptr,
.get_wptr = uvd_v5_0_ring_get_wptr,
@ -853,7 +865,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.emit_fence = uvd_v5_0_ring_emit_fence,
.test_ring = uvd_v5_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.insert_nop = uvd_v5_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
@ -861,7 +873,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
{
adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
}
static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
@ -871,8 +883,8 @@ static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
adev->uvd.inst->irq.num_types = 1;
adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
}
const struct amdgpu_ip_block_version uvd_v5_0_ip_block =

View File

@ -91,7 +91,7 @@ static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.ring_enc[0])
if (ring == &adev->uvd.inst->ring_enc[0])
return RREG32(mmUVD_RB_RPTR);
else
return RREG32(mmUVD_RB_RPTR2);
@ -121,7 +121,7 @@ static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.ring_enc[0])
if (ring == &adev->uvd.inst->ring_enc[0])
return RREG32(mmUVD_RB_WPTR);
else
return RREG32(mmUVD_RB_WPTR2);
@ -152,7 +152,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.ring_enc[0])
if (ring == &adev->uvd.inst->ring_enc[0])
WREG32(mmUVD_RB_WPTR,
lower_32_bits(ring->wptr));
else
@ -375,6 +375,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
static int uvd_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->uvd.num_uvd_inst = 1;
if (!(adev->flags & AMD_IS_APU) &&
(RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
@ -399,14 +400,14 @@ static int uvd_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
if (r)
return r;
/* UVD ENC TRAP */
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq);
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq);
if (r)
return r;
}
@ -418,17 +419,17 @@ static int uvd_v6_0_sw_init(void *handle)
if (!uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
adev->uvd.ring_enc[i].funcs = NULL;
adev->uvd.inst->ring_enc[i].funcs = NULL;
adev->uvd.irq.num_types = 1;
adev->uvd.inst->irq.num_types = 1;
adev->uvd.num_enc_rings = 0;
DRM_INFO("UVD ENC is disabled\n");
} else {
struct drm_sched_rq *rq;
ring = &adev->uvd.ring_enc[0];
ring = &adev->uvd.inst->ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
rq, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
@ -440,17 +441,17 @@ static int uvd_v6_0_sw_init(void *handle)
if (r)
return r;
ring = &adev->uvd.ring;
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i];
ring = &adev->uvd.inst->ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
}
@ -469,10 +470,10 @@ static int uvd_v6_0_sw_fini(void *handle)
return r;
if (uvd_v6_0_enc_support(adev)) {
drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
}
return amdgpu_uvd_sw_fini(adev);
@ -488,7 +489,7 @@ static int uvd_v6_0_sw_fini(void *handle)
static int uvd_v6_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int i, r;
@ -532,7 +533,7 @@ static int uvd_v6_0_hw_init(void *handle)
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i];
ring = &adev->uvd.inst->ring_enc[i];
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
if (r) {
@ -563,7 +564,7 @@ static int uvd_v6_0_hw_init(void *handle)
static int uvd_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev);
@ -611,9 +612,9 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
/* programm memory controller bits 0-27 */
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.gpu_addr));
lower_32_bits(adev->uvd.inst->gpu_addr));
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->uvd.gpu_addr));
upper_32_bits(adev->uvd.inst->gpu_addr));
offset = AMDGPU_UVD_FIRMWARE_OFFSET;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
@ -726,7 +727,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
*/
static int uvd_v6_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->uvd.ring;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
uint32_t mp_swap_cntl;
@ -866,14 +867,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
if (uvd_v6_0_enc_support(adev)) {
ring = &adev->uvd.ring_enc[0];
ring = &adev->uvd.inst->ring_enc[0];
WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
ring = &adev->uvd.ring_enc[1];
ring = &adev->uvd.inst->ring_enc[1];
WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
@ -1099,6 +1100,18 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xE);
}
static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
amdgpu_ring_write(ring, 0);
}
}
static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
@ -1158,10 +1171,10 @@ static bool uvd_v6_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
if (srbm_soft_reset) {
adev->uvd.srbm_soft_reset = srbm_soft_reset;
adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
return true;
} else {
adev->uvd.srbm_soft_reset = 0;
adev->uvd.inst->srbm_soft_reset = 0;
return false;
}
}
@ -1170,7 +1183,7 @@ static int uvd_v6_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->uvd.srbm_soft_reset)
if (!adev->uvd.inst->srbm_soft_reset)
return 0;
uvd_v6_0_stop(adev);
@ -1182,9 +1195,9 @@ static int uvd_v6_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
if (!adev->uvd.srbm_soft_reset)
if (!adev->uvd.inst->srbm_soft_reset)
return 0;
srbm_soft_reset = adev->uvd.srbm_soft_reset;
srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
if (srbm_soft_reset) {
u32 tmp;
@ -1212,7 +1225,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->uvd.srbm_soft_reset)
if (!adev->uvd.inst->srbm_soft_reset)
return 0;
mdelay(5);
@ -1238,17 +1251,17 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
switch (entry->src_id) {
case 124:
amdgpu_fence_process(&adev->uvd.ring);
amdgpu_fence_process(&adev->uvd.inst->ring);
break;
case 119:
if (likely(uvd_v6_0_enc_support(adev)))
amdgpu_fence_process(&adev->uvd.ring_enc[0]);
amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
else
int_handled = false;
break;
case 120:
if (likely(uvd_v6_0_enc_support(adev)))
amdgpu_fence_process(&adev->uvd.ring_enc[1]);
amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
else
int_handled = false;
break;
@ -1531,7 +1544,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.nop = PACKET0(mmUVD_NO_OP, 0),
.support_64bit_ptrs = false,
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
@ -1547,7 +1559,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.insert_nop = uvd_v6_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
@ -1612,10 +1624,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{
if (adev->asic_type >= CHIP_POLARIS10) {
adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
DRM_INFO("UVD is enabled in VM mode\n");
} else {
adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
DRM_INFO("UVD is enabled in physical mode\n");
}
}
@ -1625,7 +1637,7 @@ static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
DRM_INFO("UVD ENC is enabled in VM mode\n");
}
@ -1638,11 +1650,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{
if (uvd_v6_0_enc_support(adev))
adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
else
adev->uvd.irq.num_types = 1;
adev->uvd.inst->irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
}
const struct amdgpu_ip_block_version uvd_v6_0_ip_block =

File diff suppressed because it is too large Load Diff

View File

@ -35,7 +35,6 @@
#include "mmhub/mmhub_9_1_offset.h"
#include "mmhub/mmhub_9_1_sh_mask.h"
static int vcn_v1_0_start(struct amdgpu_device *adev);
static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@ -146,10 +145,6 @@ static int vcn_v1_0_hw_init(void *handle)
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
int i, r;
r = vcn_v1_0_start(adev);
if (r)
goto done;
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
if (r) {
@ -185,11 +180,9 @@ static int vcn_v1_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
int r;
r = vcn_v1_0_stop(adev);
if (r)
return r;
if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
vcn_v1_0_stop(adev);
ring->ready = false;
@ -288,14 +281,14 @@ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
*
* Disable clock gating for VCN block
*/
static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw)
static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data;
/* JPEG disable CGC */
data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
if (sw)
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
else
data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
@ -310,7 +303,7 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw)
/* UVD disable CGC */
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
if (sw)
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
else
data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
@ -415,13 +408,13 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw)
*
* Enable clock gating for VCN block
*/
static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw)
static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
/* enable JPEG CGC */
data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
if (sw)
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
else
data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
@ -435,7 +428,7 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw)
/* enable UVD CGC */
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
if (sw)
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
else
data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
@ -480,6 +473,94 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw)
WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}
static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
int ret;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret);
} else {
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret);
}
/* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
data &= ~0x103;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
}
static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
int ret;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
/* Before power off, this indicator has to be turned on */
data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
| 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
| 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret);
}
}
/**
* vcn_v1_0_start - start VCN block
*
@ -499,8 +580,9 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
vcn_v1_0_mc_resume(adev);
vcn_1_0_disable_static_power_gating(adev);
/* disable clock gating */
vcn_v1_0_disable_clock_gating(adev, true);
vcn_v1_0_disable_clock_gating(adev);
/* disable interupt */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
@ -680,16 +762,45 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* enable clock gating */
vcn_v1_0_enable_clock_gating(adev, true);
WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
vcn_v1_0_enable_clock_gating(adev);
vcn_1_0_enable_static_power_gating(adev);
return 0;
}
bool vcn_v1_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
}
int vcn_v1_0_wait_for_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0;
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret);
return ret;
}
static int vcn_v1_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
/* needed for driver unload*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (enable) {
/* wait for STATUS to clear */
if (vcn_v1_0_is_idle(handle))
return -EBUSY;
vcn_v1_0_enable_clock_gating(adev);
} else {
/* disable HW gating and enable Sw gating */
vcn_v1_0_disable_clock_gating(adev);
}
return 0;
}
@ -1048,16 +1159,36 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
struct amdgpu_device *adev = ring->adev;
int i;
for (i = 0; i < count; i++)
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
amdgpu_ring_write(ring, 0);
}
}
static int vcn_v1_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
* That's done in the dpm code via the SMC. This
* just re-inits the block as necessary. The actual
* gating still happens in the dpm code. We should
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (state == AMD_PG_STATE_GATE)
return vcn_v1_0_stop(adev);
else
return vcn_v1_0_start(adev);
}
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
@ -1069,20 +1200,19 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.hw_fini = vcn_v1_0_hw_fini,
.suspend = vcn_v1_0_suspend,
.resume = vcn_v1_0_resume,
.is_idle = NULL /* vcn_v1_0_is_idle */,
.wait_for_idle = NULL /* vcn_v1_0_wait_for_idle */,
.is_idle = vcn_v1_0_is_idle,
.wait_for_idle = vcn_v1_0_wait_for_idle,
.check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
.pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
.soft_reset = NULL /* vcn_v1_0_soft_reset */,
.post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
.set_clockgating_state = vcn_v1_0_set_clockgating_state,
.set_powergating_state = NULL /* vcn_v1_0_set_powergating_state */,
.set_powergating_state = vcn_v1_0_set_powergating_state,
};
static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
.nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.vmhub = AMDGPU_MMHUB,
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
@ -1101,7 +1231,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
.test_ring = amdgpu_vcn_dec_ring_test_ring,
.test_ib = amdgpu_vcn_dec_ring_test_ib,
.insert_nop = vcn_v1_0_ring_insert_nop,
.insert_nop = vcn_v1_0_dec_ring_insert_nop,
.insert_start = vcn_v1_0_dec_ring_insert_start,
.insert_end = vcn_v1_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,

View File

@ -0,0 +1,53 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "soc15.h"
#include "soc15_common.h"
#include "soc15_hw_ip.h"
#include "vega20_ip_offset.h"
int vega20_reg_base_init(struct amdgpu_device *adev)
{
/* HW has more IP blocks, only initialized the blocke beend by our driver */
uint32_t i;
for (i = 0 ; i < MAX_INSTANCE ; ++i) {
adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCE_BASE.instance[i]));
adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
}
return 0;
}

View File

@ -34,10 +34,4 @@ config DEBUG_KERNEL_DC
if you want to hit
kdgb_break in assert.
config DRM_AMD_DC_VEGAM
bool "VEGAM support"
depends on DRM_AMD_DC
help
Choose this option if you want to have
VEGAM support for display engine
endmenu

View File

@ -911,6 +911,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
drm_mode_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0;
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
}
mutex_unlock(&dev->mode_config.mutex);
@ -1115,6 +1116,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA12 ||
adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_RAVEN)
client_id = SOC15_IH_CLIENTID_DCE;
@ -1513,11 +1515,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case CHIP_VEGAM:
#endif
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
if (dce110_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@ -1708,9 +1709,7 @@ static int dm_early_init(void *handle)
adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_POLARIS10:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case CHIP_VEGAM:
#endif
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
@ -1718,6 +1717,7 @@ static int dm_early_init(void *handle)
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
@ -1966,6 +1966,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
if (adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA12 ||
adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
plane_state->tiling_info.gfx9.num_pipes =

View File

@ -88,9 +88,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
g = drm_color_lut_extract(lut[i].green, 16);
b = drm_color_lut_extract(lut[i].blue, 16);
gamma->entries.red[i] = dal_fixed31_32_from_int(r);
gamma->entries.green[i] = dal_fixed31_32_from_int(g);
gamma->entries.blue[i] = dal_fixed31_32_from_int(b);
gamma->entries.red[i] = dc_fixpt_from_int(r);
gamma->entries.green[i] = dc_fixpt_from_int(g);
gamma->entries.blue[i] = dc_fixpt_from_int(b);
}
return;
}
@ -101,9 +101,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
g = drm_color_lut_extract(lut[i].green, 16);
b = drm_color_lut_extract(lut[i].blue, 16);
gamma->entries.red[i] = dal_fixed31_32_from_fraction(r, MAX_DRM_LUT_VALUE);
gamma->entries.green[i] = dal_fixed31_32_from_fraction(g, MAX_DRM_LUT_VALUE);
gamma->entries.blue[i] = dal_fixed31_32_from_fraction(b, MAX_DRM_LUT_VALUE);
gamma->entries.red[i] = dc_fixpt_from_fraction(r, MAX_DRM_LUT_VALUE);
gamma->entries.green[i] = dc_fixpt_from_fraction(g, MAX_DRM_LUT_VALUE);
gamma->entries.blue[i] = dc_fixpt_from_fraction(b, MAX_DRM_LUT_VALUE);
}
}
@ -208,7 +208,7 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
for (i = 0; i < 12; i++) {
/* Skip 4th element */
if (i % 4 == 3) {
stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero;
stream->gamut_remap_matrix.matrix[i] = dc_fixpt_zero;
continue;
}

View File

@ -330,11 +330,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(
return true;
}
bool dm_helpers_dc_conn_log(struct dc_context *ctx, struct log_entry *entry, enum dc_log_type event)
{
return true;
}
void dm_dtn_log_begin(struct dc_context *ctx)
{}

View File

@ -24,7 +24,7 @@
# It provides the general basic services required by other DAL
# subcomponents.
BASICS = conversion.o fixpt31_32.o fixpt32_32.o \
BASICS = conversion.o fixpt31_32.o \
logger.o log_helpers.o vector.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))

View File

@ -41,22 +41,22 @@ uint16_t fixed_point_to_int_frac(
uint16_t result;
uint16_t d = (uint16_t)dal_fixed31_32_floor(
dal_fixed31_32_abs(
uint16_t d = (uint16_t)dc_fixpt_floor(
dc_fixpt_abs(
arg));
if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
numerator = (uint16_t)dal_fixed31_32_round(
dal_fixed31_32_mul_int(
numerator = (uint16_t)dc_fixpt_round(
dc_fixpt_mul_int(
arg,
divisor));
else {
numerator = dal_fixed31_32_floor(
dal_fixed31_32_sub(
dal_fixed31_32_from_int(
numerator = dc_fixpt_floor(
dc_fixpt_sub(
dc_fixpt_from_int(
1LL << integer_bits),
dal_fixed31_32_recip(
dal_fixed31_32_from_int(
dc_fixpt_recip(
dc_fixpt_from_int(
divisor))));
}
@ -66,8 +66,8 @@ uint16_t fixed_point_to_int_frac(
result = (uint16_t)(
(1 << (integer_bits + fractional_bits + 1)) + numerator);
if ((result != 0) && dal_fixed31_32_lt(
arg, dal_fixed31_32_zero))
if ((result != 0) && dc_fixpt_lt(
arg, dc_fixpt_zero))
result |= 1 << (integer_bits + fractional_bits);
return result;
@ -84,15 +84,15 @@ void convert_float_matrix(
uint32_t buffer_size)
{
const struct fixed31_32 min_2_13 =
dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER);
dc_fixpt_from_fraction(S2D13_MIN, DIVIDER);
const struct fixed31_32 max_2_13 =
dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER);
dc_fixpt_from_fraction(S2D13_MAX, DIVIDER);
uint32_t i;
for (i = 0; i < buffer_size; ++i) {
uint32_t reg_value =
fixed_point_to_int_frac(
dal_fixed31_32_clamp(
dc_fixpt_clamp(
flt[i],
min_2_13,
max_2_13),

View File

@ -64,9 +64,7 @@ static inline unsigned long long complete_integer_division_u64(
#define GET_FRACTIONAL_PART(x) \
(FRACTIONAL_PART_MASK & (x))
struct fixed31_32 dal_fixed31_32_from_fraction(
long long numerator,
long long denominator)
struct fixed31_32 dc_fixpt_from_fraction(long long numerator, long long denominator)
{
struct fixed31_32 res;
@ -118,63 +116,7 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
return res;
}
struct fixed31_32 dal_fixed31_32_from_int_nonconst(
long long arg)
{
struct fixed31_32 res;
ASSERT((LONG_MIN <= arg) && (arg <= LONG_MAX));
res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
return res;
}
struct fixed31_32 dal_fixed31_32_shl(
struct fixed31_32 arg,
unsigned char shift)
{
struct fixed31_32 res;
ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
res.value = arg.value << shift;
return res;
}
struct fixed31_32 dal_fixed31_32_add(
struct fixed31_32 arg1,
struct fixed31_32 arg2)
{
struct fixed31_32 res;
ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
res.value = arg1.value + arg2.value;
return res;
}
struct fixed31_32 dal_fixed31_32_sub(
struct fixed31_32 arg1,
struct fixed31_32 arg2)
{
struct fixed31_32 res;
ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
res.value = arg1.value - arg2.value;
return res;
}
struct fixed31_32 dal_fixed31_32_mul(
struct fixed31_32 arg1,
struct fixed31_32 arg2)
struct fixed31_32 dc_fixpt_mul(struct fixed31_32 arg1, struct fixed31_32 arg2)
{
struct fixed31_32 res;
@ -213,7 +155,7 @@ struct fixed31_32 dal_fixed31_32_mul(
tmp = arg1_fra * arg2_fra;
tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
(tmp >= (unsigned long long)dal_fixed31_32_half.value);
(tmp >= (unsigned long long)dc_fixpt_half.value);
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
@ -225,8 +167,7 @@ struct fixed31_32 dal_fixed31_32_mul(
return res;
}
struct fixed31_32 dal_fixed31_32_sqr(
struct fixed31_32 arg)
struct fixed31_32 dc_fixpt_sqr(struct fixed31_32 arg)
{
struct fixed31_32 res;
@ -257,7 +198,7 @@ struct fixed31_32 dal_fixed31_32_sqr(
tmp = arg_fra * arg_fra;
tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
(tmp >= (unsigned long long)dal_fixed31_32_half.value);
(tmp >= (unsigned long long)dc_fixpt_half.value);
ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
@ -266,8 +207,7 @@ struct fixed31_32 dal_fixed31_32_sqr(
return res;
}
struct fixed31_32 dal_fixed31_32_recip(
struct fixed31_32 arg)
struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg)
{
/*
* @note
@ -276,41 +216,40 @@ struct fixed31_32 dal_fixed31_32_recip(
ASSERT(arg.value);
return dal_fixed31_32_from_fraction(
dal_fixed31_32_one.value,
return dc_fixpt_from_fraction(
dc_fixpt_one.value,
arg.value);
}
struct fixed31_32 dal_fixed31_32_sinc(
struct fixed31_32 arg)
struct fixed31_32 dc_fixpt_sinc(struct fixed31_32 arg)
{
struct fixed31_32 square;
struct fixed31_32 res = dal_fixed31_32_one;
struct fixed31_32 res = dc_fixpt_one;
int n = 27;
struct fixed31_32 arg_norm = arg;
if (dal_fixed31_32_le(
dal_fixed31_32_two_pi,
dal_fixed31_32_abs(arg))) {
arg_norm = dal_fixed31_32_sub(
if (dc_fixpt_le(
dc_fixpt_two_pi,
dc_fixpt_abs(arg))) {
arg_norm = dc_fixpt_sub(
arg_norm,
dal_fixed31_32_mul_int(
dal_fixed31_32_two_pi,
dc_fixpt_mul_int(
dc_fixpt_two_pi,
(int)div64_s64(
arg_norm.value,
dal_fixed31_32_two_pi.value)));
dc_fixpt_two_pi.value)));
}
square = dal_fixed31_32_sqr(arg_norm);
square = dc_fixpt_sqr(arg_norm);
do {
res = dal_fixed31_32_sub(
dal_fixed31_32_one,
dal_fixed31_32_div_int(
dal_fixed31_32_mul(
res = dc_fixpt_sub(
dc_fixpt_one,
dc_fixpt_div_int(
dc_fixpt_mul(
square,
res),
n * (n - 1)));
@ -319,37 +258,35 @@ struct fixed31_32 dal_fixed31_32_sinc(
} while (n > 2);
if (arg.value != arg_norm.value)
res = dal_fixed31_32_div(
dal_fixed31_32_mul(res, arg_norm),
res = dc_fixpt_div(
dc_fixpt_mul(res, arg_norm),
arg);
return res;
}
struct fixed31_32 dal_fixed31_32_sin(
struct fixed31_32 arg)
struct fixed31_32 dc_fixpt_sin(struct fixed31_32 arg)
{
return dal_fixed31_32_mul(
return dc_fixpt_mul(
arg,
dal_fixed31_32_sinc(arg));
dc_fixpt_sinc(arg));
}
struct fixed31_32 dal_fixed31_32_cos(
struct fixed31_32 arg)
struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg)
{
/* TODO implement argument normalization */
const struct fixed31_32 square = dal_fixed31_32_sqr(arg);
const struct fixed31_32 square = dc_fixpt_sqr(arg);
struct fixed31_32 res = dal_fixed31_32_one;
struct fixed31_32 res = dc_fixpt_one;
int n = 26;
do {
res = dal_fixed31_32_sub(
dal_fixed31_32_one,
dal_fixed31_32_div_int(
dal_fixed31_32_mul(
res = dc_fixpt_sub(
dc_fixpt_one,
dc_fixpt_div_int(
dc_fixpt_mul(
square,
res),
n * (n - 1)));
@ -367,37 +304,35 @@ struct fixed31_32 dal_fixed31_32_cos(
*
* Calculated as Taylor series.
*/
static struct fixed31_32 fixed31_32_exp_from_taylor_series(
struct fixed31_32 arg)
static struct fixed31_32 fixed31_32_exp_from_taylor_series(struct fixed31_32 arg)
{
unsigned int n = 9;
struct fixed31_32 res = dal_fixed31_32_from_fraction(
struct fixed31_32 res = dc_fixpt_from_fraction(
n + 2,
n + 1);
/* TODO find correct res */
ASSERT(dal_fixed31_32_lt(arg, dal_fixed31_32_one));
ASSERT(dc_fixpt_lt(arg, dc_fixpt_one));
do
res = dal_fixed31_32_add(
dal_fixed31_32_one,
dal_fixed31_32_div_int(
dal_fixed31_32_mul(
res = dc_fixpt_add(
dc_fixpt_one,
dc_fixpt_div_int(
dc_fixpt_mul(
arg,
res),
n));
while (--n != 1);
return dal_fixed31_32_add(
dal_fixed31_32_one,
dal_fixed31_32_mul(
return dc_fixpt_add(
dc_fixpt_one,
dc_fixpt_mul(
arg,
res));
}
struct fixed31_32 dal_fixed31_32_exp(
struct fixed31_32 arg)
struct fixed31_32 dc_fixpt_exp(struct fixed31_32 arg)
{
/*
* @brief
@ -406,44 +341,43 @@ struct fixed31_32 dal_fixed31_32_exp(
* where m = round(x / ln(2)), r = x - m * ln(2)
*/
if (dal_fixed31_32_le(
dal_fixed31_32_ln2_div_2,
dal_fixed31_32_abs(arg))) {
int m = dal_fixed31_32_round(
dal_fixed31_32_div(
if (dc_fixpt_le(
dc_fixpt_ln2_div_2,
dc_fixpt_abs(arg))) {
int m = dc_fixpt_round(
dc_fixpt_div(
arg,
dal_fixed31_32_ln2));
dc_fixpt_ln2));
struct fixed31_32 r = dal_fixed31_32_sub(
struct fixed31_32 r = dc_fixpt_sub(
arg,
dal_fixed31_32_mul_int(
dal_fixed31_32_ln2,
dc_fixpt_mul_int(
dc_fixpt_ln2,
m));
ASSERT(m != 0);
ASSERT(dal_fixed31_32_lt(
dal_fixed31_32_abs(r),
dal_fixed31_32_one));
ASSERT(dc_fixpt_lt(
dc_fixpt_abs(r),
dc_fixpt_one));
if (m > 0)
return dal_fixed31_32_shl(
return dc_fixpt_shl(
fixed31_32_exp_from_taylor_series(r),
(unsigned char)m);
else
return dal_fixed31_32_div_int(
return dc_fixpt_div_int(
fixed31_32_exp_from_taylor_series(r),
1LL << -m);
} else if (arg.value != 0)
return fixed31_32_exp_from_taylor_series(arg);
else
return dal_fixed31_32_one;
return dc_fixpt_one;
}
struct fixed31_32 dal_fixed31_32_log(
struct fixed31_32 arg)
struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg)
{
struct fixed31_32 res = dal_fixed31_32_neg(dal_fixed31_32_one);
struct fixed31_32 res = dc_fixpt_neg(dc_fixpt_one);
/* TODO improve 1st estimation */
struct fixed31_32 error;
@ -453,15 +387,15 @@ struct fixed31_32 dal_fixed31_32_log(
/* TODO if arg is zero, return -INF */
do {
struct fixed31_32 res1 = dal_fixed31_32_add(
dal_fixed31_32_sub(
struct fixed31_32 res1 = dc_fixpt_add(
dc_fixpt_sub(
res,
dal_fixed31_32_one),
dal_fixed31_32_div(
dc_fixpt_one),
dc_fixpt_div(
arg,
dal_fixed31_32_exp(res)));
dc_fixpt_exp(res)));
error = dal_fixed31_32_sub(
error = dc_fixpt_sub(
res,
res1);
@ -472,66 +406,11 @@ struct fixed31_32 dal_fixed31_32_log(
return res;
}
struct fixed31_32 dal_fixed31_32_pow(
struct fixed31_32 arg1,
struct fixed31_32 arg2)
{
return dal_fixed31_32_exp(
dal_fixed31_32_mul(
dal_fixed31_32_log(arg1),
arg2));
}
int dal_fixed31_32_floor(
struct fixed31_32 arg)
{
unsigned long long arg_value = abs_i64(arg.value);
if (arg.value >= 0)
return (int)GET_INTEGER_PART(arg_value);
else
return -(int)GET_INTEGER_PART(arg_value);
}
int dal_fixed31_32_round(
struct fixed31_32 arg)
{
unsigned long long arg_value = abs_i64(arg.value);
const long long summand = dal_fixed31_32_half.value;
ASSERT(LLONG_MAX - (long long)arg_value >= summand);
arg_value += summand;
if (arg.value >= 0)
return (int)GET_INTEGER_PART(arg_value);
else
return -(int)GET_INTEGER_PART(arg_value);
}
int dal_fixed31_32_ceil(
struct fixed31_32 arg)
{
unsigned long long arg_value = abs_i64(arg.value);
const long long summand = dal_fixed31_32_one.value -
dal_fixed31_32_epsilon.value;
ASSERT(LLONG_MAX - (long long)arg_value >= summand);
arg_value += summand;
if (arg.value >= 0)
return (int)GET_INTEGER_PART(arg_value);
else
return -(int)GET_INTEGER_PART(arg_value);
}
/* this function is a generic helper to translate fixed point value to
* specified integer format that will consist of integer_bits integer part and
* fractional_bits fractional part. For example it is used in
* dal_fixed31_32_u2d19 to receive 2 bits integer part and 19 bits fractional
* dc_fixpt_u2d19 to receive 2 bits integer part and 19 bits fractional
* part in 32 bits. It is used in hw programming (scaler)
*/
@ -570,35 +449,30 @@ static inline unsigned int clamp_ux_dy(
return min_clamp;
}
unsigned int dal_fixed31_32_u2d19(
struct fixed31_32 arg)
unsigned int dc_fixpt_u2d19(struct fixed31_32 arg)
{
return ux_dy(arg.value, 2, 19);
}
unsigned int dal_fixed31_32_u0d19(
struct fixed31_32 arg)
unsigned int dc_fixpt_u0d19(struct fixed31_32 arg)
{
return ux_dy(arg.value, 0, 19);
}
unsigned int dal_fixed31_32_clamp_u0d14(
struct fixed31_32 arg)
unsigned int dc_fixpt_clamp_u0d14(struct fixed31_32 arg)
{
return clamp_ux_dy(arg.value, 0, 14, 1);
}
unsigned int dal_fixed31_32_clamp_u0d10(
struct fixed31_32 arg)
unsigned int dc_fixpt_clamp_u0d10(struct fixed31_32 arg)
{
return clamp_ux_dy(arg.value, 0, 10, 1);
}
int dal_fixed31_32_s4d19(
struct fixed31_32 arg)
int dc_fixpt_s4d19(struct fixed31_32 arg)
{
if (arg.value < 0)
return -(int)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19);
return -(int)ux_dy(dc_fixpt_abs(arg).value, 4, 19);
else
return ux_dy(arg.value, 4, 19);
}

View File

@ -1,161 +0,0 @@
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/fixed32_32.h"
static uint64_t u64_div(uint64_t n, uint64_t d)
{
uint32_t i = 0;
uint64_t r;
uint64_t q = div64_u64_rem(n, d, &r);
for (i = 0; i < 32; ++i) {
uint64_t sbit = q & (1ULL<<63);
r <<= 1;
r |= sbit ? 1 : 0;
q <<= 1;
if (r >= d) {
r -= d;
q |= 1;
}
}
if (2*r >= d)
q += 1;
return q;
}
struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d)
{
struct fixed32_32 fx;
fx.value = u64_div((uint64_t)n << 32, (uint64_t)d << 32);
return fx;
}
struct fixed32_32 dal_fixed32_32_add(
struct fixed32_32 lhs,
struct fixed32_32 rhs)
{
struct fixed32_32 fx = {lhs.value + rhs.value};
ASSERT(fx.value >= rhs.value);
return fx;
}
struct fixed32_32 dal_fixed32_32_add_int(struct fixed32_32 lhs, uint32_t rhs)
{
struct fixed32_32 fx = {lhs.value + ((uint64_t)rhs << 32)};
ASSERT(fx.value >= (uint64_t)rhs << 32);
return fx;
}
struct fixed32_32 dal_fixed32_32_sub(
struct fixed32_32 lhs,
struct fixed32_32 rhs)
{
struct fixed32_32 fx;
ASSERT(lhs.value >= rhs.value);
fx.value = lhs.value - rhs.value;
return fx;
}
struct fixed32_32 dal_fixed32_32_sub_int(struct fixed32_32 lhs, uint32_t rhs)
{
struct fixed32_32 fx;
ASSERT(lhs.value >= ((uint64_t)rhs<<32));
fx.value = lhs.value - ((uint64_t)rhs<<32);
return fx;
}
struct fixed32_32 dal_fixed32_32_mul(
struct fixed32_32 lhs,
struct fixed32_32 rhs)
{
struct fixed32_32 fx;
uint64_t lhs_int = lhs.value>>32;
uint64_t lhs_frac = (uint32_t)lhs.value;
uint64_t rhs_int = rhs.value>>32;
uint64_t rhs_frac = (uint32_t)rhs.value;
uint64_t ahbh = lhs_int * rhs_int;
uint64_t ahbl = lhs_int * rhs_frac;
uint64_t albh = lhs_frac * rhs_int;
uint64_t albl = lhs_frac * rhs_frac;
ASSERT((ahbh>>32) == 0);
fx.value = (ahbh<<32) + ahbl + albh + (albl>>32);
return fx;
}
struct fixed32_32 dal_fixed32_32_mul_int(struct fixed32_32 lhs, uint32_t rhs)
{
struct fixed32_32 fx;
uint64_t lhsi = (lhs.value>>32) * (uint64_t)rhs;
uint64_t lhsf;
ASSERT((lhsi>>32) == 0);
lhsf = ((uint32_t)lhs.value) * (uint64_t)rhs;
ASSERT((lhsi<<32) + lhsf >= lhsf);
fx.value = (lhsi<<32) + lhsf;
return fx;
}
struct fixed32_32 dal_fixed32_32_div(
struct fixed32_32 lhs,
struct fixed32_32 rhs)
{
struct fixed32_32 fx;
fx.value = u64_div(lhs.value, rhs.value);
return fx;
}
struct fixed32_32 dal_fixed32_32_div_int(struct fixed32_32 lhs, uint32_t rhs)
{
struct fixed32_32 fx;
fx.value = u64_div(lhs.value, (uint64_t)rhs << 32);
return fx;
}
uint32_t dal_fixed32_32_ceil(struct fixed32_32 v)
{
ASSERT((uint32_t)v.value ? (v.value >> 32) + 1 >= 1 : true);
return (v.value>>32) + ((uint32_t)v.value ? 1 : 0);
}
uint32_t dal_fixed32_32_round(struct fixed32_32 v)
{
ASSERT(v.value + (1ULL<<31) >= (1ULL<<31));
return (v.value + (1ULL<<31))>>32;
}

View File

@ -94,7 +94,6 @@ void dc_conn_log(struct dc_context *ctx,
dm_logger_append(&entry, "%2.2X ", hex_data[i]);
dm_logger_append(&entry, "^\n");
dm_helpers_dc_conn_log(ctx, &entry, event);
fail:
dm_logger_close(&entry);

View File

@ -61,7 +61,7 @@ static const struct dc_log_type_info log_type_info_tbl[] = {
{LOG_EVENT_UNDERFLOW, "Underflow"},
{LOG_IF_TRACE, "InterfaceTrace"},
{LOG_DTN, "DTN"},
{LOG_PROFILING, "Profiling"}
{LOG_DISPLAYSTATS, "DisplayStats"}
};
@ -402,3 +402,4 @@ void dm_logger_close(struct log_entry *entry)
entry->max_buf_bytes = 0;
}
}

View File

@ -1330,6 +1330,9 @@ static enum bp_result bios_parser_get_firmware_info(
case 2:
result = get_firmware_info_v3_2(bp, info);
break;
case 3:
result = get_firmware_info_v3_2(bp, info);
break;
default:
break;
}

View File

@ -51,9 +51,7 @@ bool dal_bios_parser_init_cmd_tbl_helper(
return true;
case DCE_VERSION_11_2:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case DCE_VERSION_11_22:
#endif
*h = dal_cmd_tbl_helper_dce112_get_table();
return true;

View File

@ -52,9 +52,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
return true;
case DCE_VERSION_11_2:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case DCE_VERSION_11_22:
#endif
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)

View File

@ -36,41 +36,41 @@ static bool build_custom_float(
uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
const struct fixed31_32 mantissa_constant_plus_max_fraction =
dal_fixed31_32_from_fraction(
dc_fixpt_from_fraction(
(1LL << (format->mantissa_bits + 1)) - 1,
1LL << format->mantissa_bits);
struct fixed31_32 mantiss;
if (dal_fixed31_32_eq(
if (dc_fixpt_eq(
value,
dal_fixed31_32_zero)) {
dc_fixpt_zero)) {
*negative = false;
*mantissa = 0;
*exponenta = 0;
return true;
}
if (dal_fixed31_32_lt(
if (dc_fixpt_lt(
value,
dal_fixed31_32_zero)) {
dc_fixpt_zero)) {
*negative = format->sign;
value = dal_fixed31_32_neg(value);
value = dc_fixpt_neg(value);
} else {
*negative = false;
}
if (dal_fixed31_32_lt(
if (dc_fixpt_lt(
value,
dal_fixed31_32_one)) {
dc_fixpt_one)) {
uint32_t i = 1;
do {
value = dal_fixed31_32_shl(value, 1);
value = dc_fixpt_shl(value, 1);
++i;
} while (dal_fixed31_32_lt(
} while (dc_fixpt_lt(
value,
dal_fixed31_32_one));
dc_fixpt_one));
--i;
@ -81,15 +81,15 @@ static bool build_custom_float(
}
*exponenta = exp_offset - i;
} else if (dal_fixed31_32_le(
} else if (dc_fixpt_le(
mantissa_constant_plus_max_fraction,
value)) {
uint32_t i = 1;
do {
value = dal_fixed31_32_shr(value, 1);
value = dc_fixpt_shr(value, 1);
++i;
} while (dal_fixed31_32_lt(
} while (dc_fixpt_lt(
mantissa_constant_plus_max_fraction,
value));
@ -98,23 +98,23 @@ static bool build_custom_float(
*exponenta = exp_offset;
}
mantiss = dal_fixed31_32_sub(
mantiss = dc_fixpt_sub(
value,
dal_fixed31_32_one);
dc_fixpt_one);
if (dal_fixed31_32_lt(
if (dc_fixpt_lt(
mantiss,
dal_fixed31_32_zero) ||
dal_fixed31_32_lt(
dal_fixed31_32_one,
dc_fixpt_zero) ||
dc_fixpt_lt(
dc_fixpt_one,
mantiss))
mantiss = dal_fixed31_32_zero;
mantiss = dc_fixpt_zero;
else
mantiss = dal_fixed31_32_shl(
mantiss = dc_fixpt_shl(
mantiss,
format->mantissa_bits);
*mantissa = dal_fixed31_32_floor(mantiss);
*mantissa = dc_fixpt_floor(mantiss);
return true;
}

View File

@ -59,10 +59,8 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi
return BW_CALCS_VERSION_POLARIS10;
if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev))
return BW_CALCS_VERSION_POLARIS11;
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
return BW_CALCS_VERSION_VEGAM;
#endif
return BW_CALCS_VERSION_INVALID;
case FAMILY_AI:
@ -2151,11 +2149,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
break;
case BW_CALCS_VERSION_POLARIS10:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
/* TODO: Treat VEGAM the same as P10 for now
* Need to tune the para for VEGAM if needed */
case BW_CALCS_VERSION_VEGAM:
#endif
vbios.memory_type = bw_def_gddr5;
vbios.dram_channel_width_in_bits = 32;
vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;

View File

@ -873,14 +873,14 @@ bool dcn_validate_bandwidth(
}
if (pipe->plane_state->rotation % 2 == 0) {
ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value
ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value
|| v->scaler_rec_out_width[input_idx] == v->viewport_width[input_idx]);
ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value
ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
|| v->scaler_recout_height[input_idx] == v->viewport_height[input_idx]);
} else {
ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value
ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value
|| v->scaler_recout_height[input_idx] == v->viewport_width[input_idx]);
ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value
ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
|| v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
}
v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;

View File

@ -469,6 +469,13 @@ static void link_disconnect_sink(struct dc_link *link)
link->dpcd_sink_count = 0;
}
static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link)
{
dc_sink_release(link->local_sink);
link->local_sink = prev_sink;
}
static bool detect_dp(
struct dc_link *link,
struct display_sink_capability *sink_caps,
@ -551,6 +558,17 @@ static bool detect_dp(
return true;
}
static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
{
if (old_edid->length != new_edid->length)
return false;
if (new_edid->length == 0)
return false;
return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
}
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
{
struct dc_sink_init_data sink_init_data = { 0 };
@ -558,9 +576,13 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
uint8_t i;
bool converter_disable_audio = false;
struct audio_support *aud_support = &link->dc->res_pool->audio_support;
bool same_edid = false;
enum dc_edid_status edid_status;
struct dc_context *dc_ctx = link->ctx;
struct dc_sink *sink = NULL;
struct dc_sink *prev_sink = NULL;
struct dpcd_caps prev_dpcd_caps;
bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
DC_LOGGER_INIT(link->ctx->logger);
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
@ -575,6 +597,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
link->local_sink)
return true;
prev_sink = link->local_sink;
if (prev_sink != NULL) {
dc_sink_retain(prev_sink);
memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps));
}
link_disconnect_sink(link);
if (new_connection_type != dc_connection_none) {
@ -616,14 +643,25 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
link,
&sink_caps,
&converter_disable_audio,
aud_support, reason))
aud_support, reason)) {
if (prev_sink != NULL)
dc_sink_release(prev_sink);
return false;
}
// Check if dpcp block is the same
if (prev_sink != NULL) {
if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
same_dpcd = false;
}
/* Active dongle downstream unplug */
if (link->type == dc_connection_active_dongle
&& link->dpcd_caps.sink_count.
bits.SINK_COUNT == 0)
bits.SINK_COUNT == 0) {
if (prev_sink != NULL)
dc_sink_release(prev_sink);
return true;
}
if (link->type == dc_connection_mst_branch) {
LINK_INFO("link=%d, mst branch is now Connected\n",
@ -631,9 +669,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
/* Need to setup mst link_cap struct here
* otherwise dc_link_detect() will leave mst link_cap
* empty which leads to allocate_mst_payload() has "0"
* pbn_per_slot value leading to exception on dal_fixed31_32_div()
* pbn_per_slot value leading to exception on dc_fixpt_div()
*/
link->verified_link_cap = link->reported_link_cap;
if (prev_sink != NULL)
dc_sink_release(prev_sink);
return false;
}
@ -643,6 +683,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
default:
DC_ERROR("Invalid connector type! signal:%d\n",
link->connector_signal);
if (prev_sink != NULL)
dc_sink_release(prev_sink);
return false;
} /* switch() */
@ -665,6 +707,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
sink = dc_sink_create(&sink_init_data);
if (!sink) {
DC_ERROR("Failed to create sink!\n");
if (prev_sink != NULL)
dc_sink_release(prev_sink);
return false;
}
@ -688,23 +732,34 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
break;
}
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
sink_caps.transaction_type ==
DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/*
* TODO debug why Dell 2413 doesn't like
* two link trainings
*/
// Check if edid is the same
if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
/* deal with non-mst cases */
dp_hbr_verify_link_cap(link, &link->reported_link_cap);
// If both edid and dpcd are the same, then discard new sink and revert back to original sink
if ((same_edid) && (same_dpcd)) {
link_disconnect_remap(prev_sink, link);
sink = prev_sink;
prev_sink = NULL;
} else {
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
sink_caps.transaction_type ==
DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/*
* TODO debug why Dell 2413 doesn't like
* two link trainings
*/
/* deal with non-mst cases */
dp_hbr_verify_link_cap(link, &link->reported_link_cap);
}
/* HDMI-DVI Dongle */
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
!sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
}
/* HDMI-DVI Dongle */
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
!sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
/* Connectivity log: detection */
for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
CONN_DATA_DETECT(link,
@ -762,10 +817,14 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
sink_caps.signal = SIGNAL_TYPE_NONE;
}
LINK_INFO("link=%d, dc_sink_in=%p is now %s\n",
LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
link->link_index, sink,
(sink_caps.signal == SIGNAL_TYPE_NONE ?
"Disconnected":"Connected"));
"Disconnected":"Connected"), prev_sink,
same_dpcd, same_edid);
if (prev_sink != NULL)
dc_sink_release(prev_sink);
return true;
}
@ -2059,10 +2118,10 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
&stream->sink->link->cur_link_settings;
uint32_t link_rate_in_mbps =
link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
struct fixed31_32 mbps = dal_fixed31_32_from_int(
struct fixed31_32 mbps = dc_fixpt_from_int(
link_rate_in_mbps * link_settings->lane_count);
return dal_fixed31_32_div_int(mbps, 54);
return dc_fixpt_div_int(mbps, 54);
}
static int get_color_depth(enum dc_color_depth color_depth)
@ -2103,7 +2162,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
numerator = 64 * PEAK_FACTOR_X1000;
denominator = 54 * 8 * 1000 * 1000;
kbps *= numerator;
peak_kbps = dal_fixed31_32_from_fraction(kbps, denominator);
peak_kbps = dc_fixpt_from_fraction(kbps, denominator);
return peak_kbps;
}
@ -2230,7 +2289,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
/* slot X.Y for only current stream */
pbn_per_slot = get_pbn_per_slot(stream);
pbn = get_pbn_from_timing(pipe_ctx);
avg_time_slots_per_mtp = dal_fixed31_32_div(pbn, pbn_per_slot);
avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
stream_encoder->funcs->set_mst_bandwidth(
stream_encoder,
@ -2247,7 +2306,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct link_encoder *link_encoder = link->link_enc;
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
struct dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
uint8_t i;
bool mst_mode = (link->type == dc_connection_mst_branch);
DC_LOGGER_INIT(link->ctx->logger);

View File

@ -11,8 +11,6 @@
#include "dc_link_dp.h"
#include "dc_link_ddc.h"
#include "dm_helpers.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
#include "dpcd_defs.h"
enum dc_status core_link_read_dpcd(

View File

@ -79,10 +79,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
dc_version = DCE_VERSION_11_2;
}
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
dc_version = DCE_VERSION_11_22;
#endif
break;
case FAMILY_AI:
dc_version = DCE_VERSION_12_0;
@ -129,9 +127,7 @@ struct resource_pool *dc_create_resource_pool(
num_virtual_links, dc, asic_id);
break;
case DCE_VERSION_11_2:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case DCE_VERSION_11_22:
#endif
res_pool = dce112_create_resource_pool(
num_virtual_links, dc);
break;
@ -500,9 +496,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
data->viewport_c.x = data->viewport.x / vpc_div;
data->viewport_c.y = data->viewport.y / vpc_div;
data->inits.h_c = (data->viewport.x % vpc_div) != 0 ?
dal_fixed31_32_half : dal_fixed31_32_zero;
dc_fixpt_half : dc_fixpt_zero;
data->inits.v_c = (data->viewport.y % vpc_div) != 0 ?
dal_fixed31_32_half : dal_fixed31_32_zero;
dc_fixpt_half : dc_fixpt_zero;
/* Round up, assume original video size always even dimensions */
data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
@ -631,10 +627,10 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
rect_swap_helper(&surf_src);
pipe_ctx->plane_res.scl_data.ratios.horz = dal_fixed31_32_from_fraction(
pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction(
surf_src.width,
plane_state->dst_rect.width);
pipe_ctx->plane_res.scl_data.ratios.vert = dal_fixed31_32_from_fraction(
pipe_ctx->plane_res.scl_data.ratios.vert = dc_fixpt_from_fraction(
surf_src.height,
plane_state->dst_rect.height);
@ -656,6 +652,14 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.ratios.horz_c.value /= 2;
pipe_ctx->plane_res.scl_data.ratios.vert_c.value /= 2;
}
pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_truncate(
pipe_ctx->plane_res.scl_data.ratios.horz, 19);
pipe_ctx->plane_res.scl_data.ratios.vert = dc_fixpt_truncate(
pipe_ctx->plane_res.scl_data.ratios.vert, 19);
pipe_ctx->plane_res.scl_data.ratios.horz_c = dc_fixpt_truncate(
pipe_ctx->plane_res.scl_data.ratios.horz_c, 19);
pipe_ctx->plane_res.scl_data.ratios.vert_c = dc_fixpt_truncate(
pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
}
static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
@ -692,32 +696,33 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
* init_bot = init + scaling_ratio
* init_c = init + truncated_vp_c_offset(from calculate viewport)
*/
data->inits.h = dal_fixed31_32_div_int(
dal_fixed31_32_add_int(data->ratios.horz, data->taps.h_taps + 1), 2);
data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int(
dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19);
data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_div_int(
dal_fixed31_32_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2));
data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19);
data->inits.v = dal_fixed31_32_div_int(
dal_fixed31_32_add_int(data->ratios.vert, data->taps.v_taps + 1), 2);
data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int(
dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19);
data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_div_int(
dal_fixed31_32_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2));
/* Adjust for viewport end clip-off */
if ((data->viewport.x + data->viewport.width) < (src.x + src.width) && !flip_horz_scan_dir) {
int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
int int_part = dal_fixed31_32_floor(
dal_fixed31_32_sub(data->inits.h, data->ratios.horz));
int int_part = dc_fixpt_floor(
dc_fixpt_sub(data->inits.h, data->ratios.horz));
int_part = int_part > 0 ? int_part : 0;
data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
}
if ((data->viewport.y + data->viewport.height) < (src.y + src.height) && !flip_vert_scan_dir) {
int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
int int_part = dal_fixed31_32_floor(
dal_fixed31_32_sub(data->inits.v, data->ratios.vert));
int int_part = dc_fixpt_floor(
dc_fixpt_sub(data->inits.v, data->ratios.vert));
int_part = int_part > 0 ? int_part : 0;
data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
@ -725,8 +730,8 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div && !flip_horz_scan_dir) {
int vp_clip = (src.x + src.width) / vpc_div -
data->viewport_c.width - data->viewport_c.x;
int int_part = dal_fixed31_32_floor(
dal_fixed31_32_sub(data->inits.h_c, data->ratios.horz_c));
int int_part = dc_fixpt_floor(
dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
int_part = int_part > 0 ? int_part : 0;
data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
@ -734,8 +739,8 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div && !flip_vert_scan_dir) {
int vp_clip = (src.y + src.height) / vpc_div -
data->viewport_c.height - data->viewport_c.y;
int int_part = dal_fixed31_32_floor(
dal_fixed31_32_sub(data->inits.v_c, data->ratios.vert_c));
int int_part = dc_fixpt_floor(
dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
int_part = int_part > 0 ? int_part : 0;
data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
@ -745,9 +750,9 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
if (data->viewport.x && !flip_horz_scan_dir) {
int int_part;
data->inits.h = dal_fixed31_32_add(data->inits.h, dal_fixed31_32_mul_int(
data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
data->ratios.horz, recout_skip->width));
int_part = dal_fixed31_32_floor(data->inits.h) - data->viewport.x;
int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
if (int_part < data->taps.h_taps) {
int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
(data->taps.h_taps - int_part) : data->viewport.x;
@ -760,15 +765,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
int_part = data->taps.h_taps;
}
data->inits.h.value &= 0xffffffff;
data->inits.h = dal_fixed31_32_add_int(data->inits.h, int_part);
data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
}
if (data->viewport_c.x && !flip_horz_scan_dir) {
int int_part;
data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_mul_int(
data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
data->ratios.horz_c, recout_skip->width));
int_part = dal_fixed31_32_floor(data->inits.h_c) - data->viewport_c.x;
int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
if (int_part < data->taps.h_taps_c) {
int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
(data->taps.h_taps_c - int_part) : data->viewport_c.x;
@ -781,15 +786,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
int_part = data->taps.h_taps_c;
}
data->inits.h_c.value &= 0xffffffff;
data->inits.h_c = dal_fixed31_32_add_int(data->inits.h_c, int_part);
data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
}
if (data->viewport.y && !flip_vert_scan_dir) {
int int_part;
data->inits.v = dal_fixed31_32_add(data->inits.v, dal_fixed31_32_mul_int(
data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
data->ratios.vert, recout_skip->height));
int_part = dal_fixed31_32_floor(data->inits.v) - data->viewport.y;
int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
if (int_part < data->taps.v_taps) {
int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
(data->taps.v_taps - int_part) : data->viewport.y;
@ -802,15 +807,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
int_part = data->taps.v_taps;
}
data->inits.v.value &= 0xffffffff;
data->inits.v = dal_fixed31_32_add_int(data->inits.v, int_part);
data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
}
if (data->viewport_c.y && !flip_vert_scan_dir) {
int int_part;
data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_mul_int(
data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
data->ratios.vert_c, recout_skip->height));
int_part = dal_fixed31_32_floor(data->inits.v_c) - data->viewport_c.y;
int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
if (int_part < data->taps.v_taps_c) {
int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
(data->taps.v_taps_c - int_part) : data->viewport_c.y;
@ -823,12 +828,12 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
int_part = data->taps.v_taps_c;
}
data->inits.v_c.value &= 0xffffffff;
data->inits.v_c = dal_fixed31_32_add_int(data->inits.v_c, int_part);
data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
}
/* Interlaced inits based on final vert inits */
data->inits.v_bot = dal_fixed31_32_add(data->inits.v, data->ratios.vert);
data->inits.v_c_bot = dal_fixed31_32_add(data->inits.v_c, data->ratios.vert_c);
data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {

View File

@ -75,6 +75,7 @@ struct dc_caps {
bool dynamic_audio;
bool is_apu;
bool dual_link_dvi;
bool post_blend_color_processing;
};
struct dc_dcc_surface_param {
@ -203,6 +204,7 @@ struct dc_debug {
bool clock_trace;
bool validation_trace;
bool bandwidth_calcs_trace;
int max_downscale_src_width;
/* stutter efficiency related */
bool disable_stutter;
@ -239,6 +241,8 @@ struct dc_debug {
bool az_endpoint_mute_only;
bool always_use_regamma;
bool p010_mpo_support;
bool recovery_enabled;
};
struct dc_state;
struct resource_pool;
@ -499,18 +503,18 @@ struct dc_surface_update {
struct dc_plane_state *surface;
/* isr safe update parameters. null means no updates */
struct dc_flip_addrs *flip_addr;
struct dc_plane_info *plane_info;
struct dc_scaling_info *scaling_info;
const struct dc_flip_addrs *flip_addr;
const struct dc_plane_info *plane_info;
const struct dc_scaling_info *scaling_info;
/* following updates require alloc/sleep/spin that is not isr safe,
* null means no updates
*/
struct dc_gamma *gamma;
struct dc_transfer_func *in_transfer_func;
const struct dc_gamma *gamma;
const struct dc_transfer_func *in_transfer_func;
struct dc_csc_transform *input_csc_color_matrix;
struct fixed31_32 *coeff_reduction_factor;
const struct dc_csc_transform *input_csc_color_matrix;
const struct fixed31_32 *coeff_reduction_factor;
};
/*

View File

@ -26,6 +26,8 @@
#ifndef DC_DP_TYPES_H
#define DC_DP_TYPES_H
#include "os_types.h"
enum dc_lane_count {
LANE_COUNT_UNKNOWN = 0,
LANE_COUNT_ONE = 1,

View File

@ -25,7 +25,7 @@
#ifndef DC_TYPES_H_
#define DC_TYPES_H_
#include "fixed32_32.h"
#include "os_types.h"
#include "fixed31_32.h"
#include "irq_types.h"
#include "dc_dp_types.h"

View File

@ -26,7 +26,7 @@
#include "dce_abm.h"
#include "dm_services.h"
#include "reg_helper.h"
#include "fixed32_32.h"
#include "fixed31_32.h"
#include "dc.h"
#include "atom.h"

View File

@ -590,9 +590,7 @@ static uint32_t dce110_get_pix_clk_dividers(
pll_settings, pix_clk_params);
break;
case DCE_VERSION_11_2:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case DCE_VERSION_11_22:
#endif
case DCE_VERSION_12_0:
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
@ -659,12 +657,12 @@ static uint32_t dce110_get_d_to_pixel_rate_in_hz(
return 0;
}
pix_rate = dal_fixed31_32_from_int(clk_src->ref_freq_khz);
pix_rate = dal_fixed31_32_mul_int(pix_rate, 1000);
pix_rate = dal_fixed31_32_mul_int(pix_rate, phase);
pix_rate = dal_fixed31_32_div_int(pix_rate, modulo);
pix_rate = dc_fixpt_from_int(clk_src->ref_freq_khz);
pix_rate = dc_fixpt_mul_int(pix_rate, 1000);
pix_rate = dc_fixpt_mul_int(pix_rate, phase);
pix_rate = dc_fixpt_div_int(pix_rate, modulo);
return dal_fixed31_32_round(pix_rate);
return dc_fixpt_round(pix_rate);
} else {
return dce110_get_dp_pixel_rate_from_combo_phy_pll(cs, pix_clk_params, pll_settings);
}
@ -713,12 +711,12 @@ static bool calculate_ss(
const struct spread_spectrum_data *ss_data,
struct delta_sigma_data *ds_data)
{
struct fixed32_32 fb_div;
struct fixed32_32 ss_amount;
struct fixed32_32 ss_nslip_amount;
struct fixed32_32 ss_ds_frac_amount;
struct fixed32_32 ss_step_size;
struct fixed32_32 modulation_time;
struct fixed31_32 fb_div;
struct fixed31_32 ss_amount;
struct fixed31_32 ss_nslip_amount;
struct fixed31_32 ss_ds_frac_amount;
struct fixed31_32 ss_step_size;
struct fixed31_32 modulation_time;
if (ds_data == NULL)
return false;
@ -733,42 +731,42 @@ static bool calculate_ss(
/* compute SS_AMOUNT_FBDIV & SS_AMOUNT_NFRAC_SLIP & SS_AMOUNT_DSFRAC*/
/* 6 decimal point support in fractional feedback divider */
fb_div = dal_fixed32_32_from_fraction(
fb_div = dc_fixpt_from_fraction(
pll_settings->fract_feedback_divider, 1000000);
fb_div = dal_fixed32_32_add_int(fb_div, pll_settings->feedback_divider);
fb_div = dc_fixpt_add_int(fb_div, pll_settings->feedback_divider);
ds_data->ds_frac_amount = 0;
/*spreadSpectrumPercentage is in the unit of .01%,
* so have to divided by 100 * 100*/
ss_amount = dal_fixed32_32_mul(
fb_div, dal_fixed32_32_from_fraction(ss_data->percentage,
ss_amount = dc_fixpt_mul(
fb_div, dc_fixpt_from_fraction(ss_data->percentage,
100 * ss_data->percentage_divider));
ds_data->feedback_amount = dal_fixed32_32_floor(ss_amount);
ds_data->feedback_amount = dc_fixpt_floor(ss_amount);
ss_nslip_amount = dal_fixed32_32_sub(ss_amount,
dal_fixed32_32_from_int(ds_data->feedback_amount));
ss_nslip_amount = dal_fixed32_32_mul_int(ss_nslip_amount, 10);
ds_data->nfrac_amount = dal_fixed32_32_floor(ss_nslip_amount);
ss_nslip_amount = dc_fixpt_sub(ss_amount,
dc_fixpt_from_int(ds_data->feedback_amount));
ss_nslip_amount = dc_fixpt_mul_int(ss_nslip_amount, 10);
ds_data->nfrac_amount = dc_fixpt_floor(ss_nslip_amount);
ss_ds_frac_amount = dal_fixed32_32_sub(ss_nslip_amount,
dal_fixed32_32_from_int(ds_data->nfrac_amount));
ss_ds_frac_amount = dal_fixed32_32_mul_int(ss_ds_frac_amount, 65536);
ds_data->ds_frac_amount = dal_fixed32_32_floor(ss_ds_frac_amount);
ss_ds_frac_amount = dc_fixpt_sub(ss_nslip_amount,
dc_fixpt_from_int(ds_data->nfrac_amount));
ss_ds_frac_amount = dc_fixpt_mul_int(ss_ds_frac_amount, 65536);
ds_data->ds_frac_amount = dc_fixpt_floor(ss_ds_frac_amount);
/* compute SS_STEP_SIZE_DSFRAC */
modulation_time = dal_fixed32_32_from_fraction(
modulation_time = dc_fixpt_from_fraction(
pll_settings->reference_freq * 1000,
pll_settings->reference_divider * ss_data->modulation_freq_hz);
if (ss_data->flags.CENTER_SPREAD)
modulation_time = dal_fixed32_32_div_int(modulation_time, 4);
modulation_time = dc_fixpt_div_int(modulation_time, 4);
else
modulation_time = dal_fixed32_32_div_int(modulation_time, 2);
modulation_time = dc_fixpt_div_int(modulation_time, 2);
ss_step_size = dal_fixed32_32_div(ss_amount, modulation_time);
ss_step_size = dc_fixpt_div(ss_amount, modulation_time);
/* SS_STEP_SIZE_DSFRAC_DEC = Int(SS_STEP_SIZE * 2 ^ 16 * 10)*/
ss_step_size = dal_fixed32_32_mul_int(ss_step_size, 65536 * 10);
ds_data->ds_frac_size = dal_fixed32_32_floor(ss_step_size);
ss_step_size = dc_fixpt_mul_int(ss_step_size, 65536 * 10);
ds_data->ds_frac_size = dc_fixpt_floor(ss_step_size);
return true;
}
@ -982,9 +980,7 @@ static bool dce110_program_pix_clk(
break;
case DCE_VERSION_11_2:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case DCE_VERSION_11_22:
#endif
case DCE_VERSION_12_0:
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:

View File

@ -26,7 +26,7 @@
#include "dce_clocks.h"
#include "dm_services.h"
#include "reg_helper.h"
#include "fixed32_32.h"
#include "fixed31_32.h"
#include "bios_parser_interface.h"
#include "dc.h"
#include "dmcu.h"
@ -35,7 +35,7 @@
#endif
#include "core_types.h"
#include "dc_types.h"
#include "dal_asic_id.h"
#define TO_DCE_CLOCKS(clocks)\
container_of(clocks, struct dce_disp_clk, base)
@ -228,19 +228,19 @@ static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
generated according to average value (case as with previous ASICs)
*/
if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
dal_fixed32_32_from_fraction(
struct fixed31_32 ss_percentage = dc_fixpt_div_int(
dc_fixpt_from_fraction(
clk_dce->dprefclk_ss_percentage,
clk_dce->dprefclk_ss_divider), 200);
struct fixed32_32 adj_dp_ref_clk_khz;
struct fixed31_32 adj_dp_ref_clk_khz;
ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
ss_percentage = dc_fixpt_sub(dc_fixpt_one,
ss_percentage);
adj_dp_ref_clk_khz =
dal_fixed32_32_mul_int(
dc_fixpt_mul_int(
ss_percentage,
dp_ref_clk_khz);
dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
}
return dp_ref_clk_khz;
@ -256,19 +256,19 @@ static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
int dp_ref_clk_khz = 600000;
if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
dal_fixed32_32_from_fraction(
struct fixed31_32 ss_percentage = dc_fixpt_div_int(
dc_fixpt_from_fraction(
clk_dce->dprefclk_ss_percentage,
clk_dce->dprefclk_ss_divider), 200);
struct fixed32_32 adj_dp_ref_clk_khz;
struct fixed31_32 adj_dp_ref_clk_khz;
ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
ss_percentage = dc_fixpt_sub(dc_fixpt_one,
ss_percentage);
adj_dp_ref_clk_khz =
dal_fixed32_32_mul_int(
dc_fixpt_mul_int(
ss_percentage,
dp_ref_clk_khz);
dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
}
return dp_ref_clk_khz;
@ -413,9 +413,12 @@ static int dce112_set_clock(
/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
dce_clk_params.target_clock_frequency = 0;
dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
(dce_clk_params.pll_id ==
CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
else
dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
bp->funcs->set_dce_clock(bp, &dce_clk_params);

View File

@ -28,7 +28,7 @@
#include "dce_dmcu.h"
#include "dm_services.h"
#include "reg_helper.h"
#include "fixed32_32.h"
#include "fixed31_32.h"
#include "dc.h"
#define TO_DCE_DMCU(dmcu)\

View File

@ -195,13 +195,13 @@ static void dce_ipp_program_input_lut(
for (i = 0; i < gamma->num_entries; i++) {
REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
dal_fixed31_32_round(
dc_fixpt_round(
gamma->entries.red[i]));
REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
dal_fixed31_32_round(
dc_fixpt_round(
gamma->entries.green[i]));
REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
dal_fixed31_32_round(
dc_fixpt_round(
gamma->entries.blue[i]));
}

View File

@ -1014,11 +1014,11 @@ static const uint16_t filter_8tap_64p_183[264] = {
const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < dal_fixed31_32_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_16p_upscale;
else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_3tap_16p_117;
else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_3tap_16p_150;
else
return filter_3tap_16p_183;
@ -1026,11 +1026,11 @@ const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
{
if (ratio.value < dal_fixed31_32_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_64p_upscale;
else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_3tap_64p_117;
else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_3tap_64p_150;
else
return filter_3tap_64p_183;
@ -1038,11 +1038,11 @@ const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < dal_fixed31_32_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_16p_upscale;
else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_4tap_16p_117;
else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_4tap_16p_150;
else
return filter_4tap_16p_183;
@ -1050,11 +1050,11 @@ const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
{
if (ratio.value < dal_fixed31_32_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_64p_upscale;
else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_4tap_64p_117;
else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_4tap_64p_150;
else
return filter_4tap_64p_183;
@ -1062,11 +1062,11 @@ const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
{
if (ratio.value < dal_fixed31_32_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_5tap_64p_upscale;
else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_5tap_64p_117;
else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_5tap_64p_150;
else
return filter_5tap_64p_183;
@ -1074,11 +1074,11 @@ const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
{
if (ratio.value < dal_fixed31_32_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_6tap_64p_upscale;
else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_6tap_64p_117;
else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_6tap_64p_150;
else
return filter_6tap_64p_183;
@ -1086,11 +1086,11 @@ const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
{
if (ratio.value < dal_fixed31_32_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_7tap_64p_upscale;
else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_7tap_64p_117;
else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_7tap_64p_150;
else
return filter_7tap_64p_183;
@ -1098,11 +1098,11 @@ const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio)
{
if (ratio.value < dal_fixed31_32_one.value)
if (ratio.value < dc_fixpt_one.value)
return filter_8tap_64p_upscale;
else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
return filter_8tap_64p_117;
else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
return filter_8tap_64p_150;
else
return filter_8tap_64p_183;

View File

@ -683,11 +683,11 @@ static void dce110_stream_encoder_set_mst_bandwidth(
struct fixed31_32 avg_time_slots_per_mtp)
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
uint32_t x = dal_fixed31_32_floor(
uint32_t x = dc_fixpt_floor(
avg_time_slots_per_mtp);
uint32_t y = dal_fixed31_32_ceil(
dal_fixed31_32_shl(
dal_fixed31_32_sub_int(
uint32_t y = dc_fixpt_ceil(
dc_fixpt_shl(
dc_fixpt_sub_int(
avg_time_slots_per_mtp,
x),
26));

View File

@ -41,7 +41,7 @@
#define DC_LOGGER \
xfm_dce->base.ctx->logger
#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
#define GAMUT_MATRIX_SIZE 12
#define SCL_PHASES 16
@ -256,27 +256,27 @@ static void calculate_inits(
struct fixed31_32 v_init;
inits->h_int_scale_ratio =
dal_fixed31_32_u2d19(data->ratios.horz) << 5;
dc_fixpt_u2d19(data->ratios.horz) << 5;
inits->v_int_scale_ratio =
dal_fixed31_32_u2d19(data->ratios.vert) << 5;
dc_fixpt_u2d19(data->ratios.vert) << 5;
h_init =
dal_fixed31_32_div_int(
dal_fixed31_32_add(
dc_fixpt_div_int(
dc_fixpt_add(
data->ratios.horz,
dal_fixed31_32_from_int(data->taps.h_taps + 1)),
dc_fixpt_from_int(data->taps.h_taps + 1)),
2);
inits->h_init.integer = dal_fixed31_32_floor(h_init);
inits->h_init.fraction = dal_fixed31_32_u0d19(h_init) << 5;
inits->h_init.integer = dc_fixpt_floor(h_init);
inits->h_init.fraction = dc_fixpt_u0d19(h_init) << 5;
v_init =
dal_fixed31_32_div_int(
dal_fixed31_32_add(
dc_fixpt_div_int(
dc_fixpt_add(
data->ratios.vert,
dal_fixed31_32_from_int(data->taps.v_taps + 1)),
dc_fixpt_from_int(data->taps.v_taps + 1)),
2);
inits->v_init.integer = dal_fixed31_32_floor(v_init);
inits->v_init.fraction = dal_fixed31_32_u0d19(v_init) << 5;
inits->v_init.integer = dc_fixpt_floor(v_init);
inits->v_init.fraction = dc_fixpt_u0d19(v_init) << 5;
}
static void program_scl_ratios_inits(

View File

@ -509,19 +509,19 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
dal_fixed31_32_from_int(region_start));
arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
dal_fixed31_32_from_int(region_end));
arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_end));
y_r = rgb_resulted[0].red;
y_g = rgb_resulted[0].green;
y_b = rgb_resulted[0].blue;
y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
arr_points[0].y = y1_min;
arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y,
arr_points[0].slope = dc_fixpt_div(arr_points[0].y,
arr_points[0].x);
y_r = rgb_resulted[hw_points - 1].red;
@ -531,21 +531,21 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
/* see comment above, m_arrPoints[1].y should be the Y value for the
* region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
*/
y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
arr_points[1].y = y3_max;
arr_points[1].slope = dal_fixed31_32_zero;
arr_points[1].slope = dc_fixpt_zero;
if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
/* for PQ, we want to have a straight line from last HW X point,
* and the slope to be such that we hit 1.0 at 10000 nits.
*/
const struct fixed31_32 end_value = dal_fixed31_32_from_int(125);
const struct fixed31_32 end_value = dc_fixpt_from_int(125);
arr_points[1].slope = dal_fixed31_32_div(
dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
dal_fixed31_32_sub(end_value, arr_points[1].x));
arr_points[1].slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
dc_fixpt_sub(end_value, arr_points[1].x));
}
regamma_params->hw_points_num = hw_points;
@ -569,16 +569,16 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
i = 1;
while (i != hw_points + 1) {
if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
rgb_plus_1->red = rgb->red;
if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
rgb_plus_1->green = rgb->green;
if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
rgb_plus_1->blue = rgb->blue;
rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
++rgb_plus_1;
++rgb;
@ -2269,74 +2269,6 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
}
/**
* TODO REMOVE, USE UPDATE INSTEAD
*/
static void set_plane_config(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct resource_context *res_ctx)
{
struct mem_input *mi = pipe_ctx->plane_res.mi;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
unsigned int i;
memset(&adjust, 0, sizeof(adjust));
memset(&tbl_entry, 0, sizeof(tbl_entry));
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
dce_enable_fe_clock(dc->hwseq, mi->inst, true);
set_default_colors(pipe_ctx);
if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
tbl_entry.color_space =
pipe_ctx->stream->output_color_space;
for (i = 0; i < 12; i++)
tbl_entry.regval[i] =
pipe_ctx->stream->csc_color_matrix.matrix[i];
pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment
(pipe_ctx->plane_res.xfm, &tbl_entry);
}
if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
adjust.temperature_matrix[i] =
pipe_ctx->stream->gamut_remap_matrix.matrix[i];
}
pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
program_scaler(dc, pipe_ctx);
program_surface_visibility(dc, pipe_ctx);
mi->funcs->mem_input_program_surface_config(
mi,
plane_state->format,
&plane_state->tiling_info,
&plane_state->plane_size,
plane_state->rotation,
NULL,
false);
if (mi->funcs->set_blank)
mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible);
if (dc->config.gpu_vm_support)
mi->funcs->mem_input_program_pte_vm(
pipe_ctx->plane_res.mi,
plane_state->format,
&plane_state->tiling_info,
plane_state->rotation);
}
static void update_plane_addr(const struct dc *dc,
struct pipe_ctx *pipe_ctx)
{
@ -3023,7 +2955,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.init_hw = init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
.set_plane_config = set_plane_config,
.update_plane_addr = update_plane_addr,
.update_pending_status = dce110_update_pending_status,
.set_input_transfer_func = dce110_set_input_transfer_func,

View File

@ -373,13 +373,13 @@ static void calculate_inits(
struct rect *chroma_viewport)
{
inits->h_int_scale_ratio_luma =
dal_fixed31_32_u2d19(data->ratios.horz) << 5;
dc_fixpt_u2d19(data->ratios.horz) << 5;
inits->v_int_scale_ratio_luma =
dal_fixed31_32_u2d19(data->ratios.vert) << 5;
dc_fixpt_u2d19(data->ratios.vert) << 5;
inits->h_int_scale_ratio_chroma =
dal_fixed31_32_u2d19(data->ratios.horz_c) << 5;
dc_fixpt_u2d19(data->ratios.horz_c) << 5;
inits->v_int_scale_ratio_chroma =
dal_fixed31_32_u2d19(data->ratios.vert_c) << 5;
dc_fixpt_u2d19(data->ratios.vert_c) << 5;
inits->h_init_luma.integer = 1;
inits->v_init_luma.integer = 1;

View File

@ -814,14 +814,25 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
}
static uint32_t read_pipe_fuses(struct dc_context *ctx)
{
uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
/* VG20 support max 6 pipes */
value = value & 0x3f;
return value;
}
static bool construct(
uint8_t num_virtual_links,
struct dc *dc,
struct dce110_resource_pool *pool)
{
unsigned int i;
int j;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data irq_init_data;
bool harvest_enabled = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev);
uint32_t pipe_fuses;
ctx->dc_bios->regs = &bios_regs;
@ -915,28 +926,41 @@ static bool construct(
if (!pool->base.irqs)
goto irqs_create_fail;
/* retrieve valid pipe fuses */
if (harvest_enabled)
pipe_fuses = read_pipe_fuses(ctx);
/* index to valid pipe resource */
j = 0;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
if (harvest_enabled) {
if ((pipe_fuses & (1 << i)) != 0) {
dm_error("DC: skip invalid pipe %d!\n", i);
continue;
}
}
pool->base.timing_generators[j] =
dce120_timing_generator_create(
ctx,
i,
&dce120_tg_offsets[i]);
if (pool->base.timing_generators[i] == NULL) {
if (pool->base.timing_generators[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create tg!\n");
goto controller_create_fail;
}
pool->base.mis[i] = dce120_mem_input_create(ctx, i);
pool->base.mis[j] = dce120_mem_input_create(ctx, i);
if (pool->base.mis[i] == NULL) {
if (pool->base.mis[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create memory input!\n");
goto controller_create_fail;
}
pool->base.ipps[i] = dce120_ipp_create(ctx, i);
pool->base.ipps[j] = dce120_ipp_create(ctx, i);
if (pool->base.ipps[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
@ -944,7 +968,7 @@ static bool construct(
goto controller_create_fail;
}
pool->base.transforms[i] = dce120_transform_create(ctx, i);
pool->base.transforms[j] = dce120_transform_create(ctx, i);
if (pool->base.transforms[i] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
@ -952,16 +976,23 @@ static bool construct(
goto res_create_fail;
}
pool->base.opps[i] = dce120_opp_create(
pool->base.opps[j] = dce120_opp_create(
ctx,
i);
if (pool->base.opps[i] == NULL) {
if (pool->base.opps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create output pixel processor!\n");
}
/* check next valid pipe */
j++;
}
/* valid pipe num */
pool->base.pipe_count = j;
pool->base.timing_generator_count = j;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto res_create_fail;

View File

@ -26,7 +26,7 @@ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
dcn10_hubp.o dcn10_mpc.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
dcn10_hubbub.o dcn10_stream_encoder.o
dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))

View File

@ -169,7 +169,7 @@ bool cm_helper_convert_to_custom_float(
}
if (fixpoint == true)
arr_points[1].custom_float_y = dal_fixed31_32_clamp_u0d14(arr_points[1].y);
arr_points[1].custom_float_y = dc_fixpt_clamp_u0d14(arr_points[1].y);
else if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
&arr_points[1].custom_float_y)) {
BREAK_TO_DEBUGGER();
@ -327,19 +327,19 @@ bool cm_helper_translate_curve_to_hw_format(
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
dal_fixed31_32_from_int(region_start));
arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
dal_fixed31_32_from_int(region_end));
arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_end));
y_r = rgb_resulted[0].red;
y_g = rgb_resulted[0].green;
y_b = rgb_resulted[0].blue;
y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
arr_points[0].y = y1_min;
arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
y_r = rgb_resulted[hw_points - 1].red;
y_g = rgb_resulted[hw_points - 1].green;
y_b = rgb_resulted[hw_points - 1].blue;
@ -347,22 +347,22 @@ bool cm_helper_translate_curve_to_hw_format(
/* see comment above, m_arrPoints[1].y should be the Y value for the
* region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
*/
y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
arr_points[1].y = y3_max;
arr_points[1].slope = dal_fixed31_32_zero;
arr_points[1].slope = dc_fixpt_zero;
if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
/* for PQ, we want to have a straight line from last HW X point,
* and the slope to be such that we hit 1.0 at 10000 nits.
*/
const struct fixed31_32 end_value =
dal_fixed31_32_from_int(125);
dc_fixpt_from_int(125);
arr_points[1].slope = dal_fixed31_32_div(
dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
dal_fixed31_32_sub(end_value, arr_points[1].x));
arr_points[1].slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
dc_fixpt_sub(end_value, arr_points[1].x));
}
lut_params->hw_points_num = hw_points;
@ -386,24 +386,24 @@ bool cm_helper_translate_curve_to_hw_format(
i = 1;
while (i != hw_points + 1) {
if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
rgb_plus_1->red = rgb->red;
if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
rgb_plus_1->green = rgb->green;
if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
rgb_plus_1->blue = rgb->blue;
rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
if (fixpoint == true) {
rgb->delta_red_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_red);
rgb->delta_green_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_green);
rgb->delta_blue_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_blue);
rgb->red_reg = dal_fixed31_32_clamp_u0d14(rgb->red);
rgb->green_reg = dal_fixed31_32_clamp_u0d14(rgb->green);
rgb->blue_reg = dal_fixed31_32_clamp_u0d14(rgb->blue);
rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red);
rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green);
rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue);
rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red);
rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green);
rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);
}
++rgb_plus_1;
@ -489,19 +489,19 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
dal_fixed31_32_from_int(region_start));
arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
dal_fixed31_32_from_int(region_end));
arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_end));
y_r = rgb_resulted[0].red;
y_g = rgb_resulted[0].green;
y_b = rgb_resulted[0].blue;
y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
arr_points[0].y = y1_min;
arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
y_r = rgb_resulted[hw_points - 1].red;
y_g = rgb_resulted[hw_points - 1].green;
y_b = rgb_resulted[hw_points - 1].blue;
@ -509,22 +509,22 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
/* see comment above, m_arrPoints[1].y should be the Y value for the
* region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
*/
y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
arr_points[1].y = y3_max;
arr_points[1].slope = dal_fixed31_32_zero;
arr_points[1].slope = dc_fixpt_zero;
if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
/* for PQ, we want to have a straight line from last HW X point,
* and the slope to be such that we hit 1.0 at 10000 nits.
*/
const struct fixed31_32 end_value =
dal_fixed31_32_from_int(125);
dc_fixpt_from_int(125);
arr_points[1].slope = dal_fixed31_32_div(
dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
dal_fixed31_32_sub(end_value, arr_points[1].x));
arr_points[1].slope = dc_fixpt_div(
dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
dc_fixpt_sub(end_value, arr_points[1].x));
}
lut_params->hw_points_num = hw_points;
@ -548,16 +548,16 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
i = 1;
while (i != hw_points + 1) {
if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
rgb_plus_1->red = rgb->red;
if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
rgb_plus_1->green = rgb->green;
if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
rgb_plus_1->blue = rgb->blue;
rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
++rgb_plus_1;
++rgb;

View File

@ -130,7 +130,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
/* Gamut remap in bypass */
}
#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
bool dpp_get_optimal_number_of_taps(
@ -152,6 +152,11 @@ bool dpp_get_optimal_number_of_taps(
scl_data->format == PIXEL_FORMAT_FP16)
return false;
if (scl_data->viewport.width > scl_data->h_active &&
dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
return false;
/* TODO: add lb check */
/* No support for programming ratio of 4, drop to 3.99999.. */

View File

@ -811,13 +811,13 @@ void dpp1_program_input_lut(
REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0);
for (i = 0; i < gamma->num_entries; i++) {
REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
dal_fixed31_32_round(
dc_fixpt_round(
gamma->entries.red[i]));
REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
dal_fixed31_32_round(
dc_fixpt_round(
gamma->entries.green[i]));
REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
dal_fixed31_32_round(
dc_fixpt_round(
gamma->entries.blue[i]));
}
// Power off LUT memory

View File

@ -169,7 +169,7 @@ static enum dscl_mode_sel dpp1_dscl_get_dscl_mode(
const struct scaler_data *data,
bool dbg_always_scale)
{
const long long one = dal_fixed31_32_one.value;
const long long one = dc_fixpt_one.value;
if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL is processing data in fixed format */
@ -464,8 +464,8 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d
int num_part_y, num_part_c;
int vtaps = scl_data->taps.v_taps;
int vtaps_c = scl_data->taps.v_taps_c;
int ceil_vratio = dal_fixed31_32_ceil(scl_data->ratios.vert);
int ceil_vratio_c = dal_fixed31_32_ceil(scl_data->ratios.vert_c);
int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert);
int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0;
if (dpp->base.ctx->dc->debug.use_max_lb)
@ -565,52 +565,52 @@ static void dpp1_dscl_set_manual_ratio_init(
uint32_t init_int = 0;
REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
SCL_H_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.horz) << 5);
SCL_H_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.horz) << 5);
REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
SCL_V_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.vert) << 5);
SCL_V_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.vert) << 5);
REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0,
SCL_H_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.horz_c) << 5);
SCL_H_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.horz_c) << 5);
REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0,
SCL_V_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.vert_c) << 5);
SCL_V_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.vert_c) << 5);
/*
* 0.24 format for fraction, first five bits zeroed
*/
init_frac = dal_fixed31_32_u0d19(data->inits.h) << 5;
init_int = dal_fixed31_32_floor(data->inits.h);
init_frac = dc_fixpt_u0d19(data->inits.h) << 5;
init_int = dc_fixpt_floor(data->inits.h);
REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
SCL_H_INIT_FRAC, init_frac,
SCL_H_INIT_INT, init_int);
init_frac = dal_fixed31_32_u0d19(data->inits.h_c) << 5;
init_int = dal_fixed31_32_floor(data->inits.h_c);
init_frac = dc_fixpt_u0d19(data->inits.h_c) << 5;
init_int = dc_fixpt_floor(data->inits.h_c);
REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0,
SCL_H_INIT_FRAC_C, init_frac,
SCL_H_INIT_INT_C, init_int);
init_frac = dal_fixed31_32_u0d19(data->inits.v) << 5;
init_int = dal_fixed31_32_floor(data->inits.v);
init_frac = dc_fixpt_u0d19(data->inits.v) << 5;
init_int = dc_fixpt_floor(data->inits.v);
REG_SET_2(SCL_VERT_FILTER_INIT, 0,
SCL_V_INIT_FRAC, init_frac,
SCL_V_INIT_INT, init_int);
init_frac = dal_fixed31_32_u0d19(data->inits.v_bot) << 5;
init_int = dal_fixed31_32_floor(data->inits.v_bot);
init_frac = dc_fixpt_u0d19(data->inits.v_bot) << 5;
init_int = dc_fixpt_floor(data->inits.v_bot);
REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
SCL_V_INIT_FRAC_BOT, init_frac,
SCL_V_INIT_INT_BOT, init_int);
init_frac = dal_fixed31_32_u0d19(data->inits.v_c) << 5;
init_int = dal_fixed31_32_floor(data->inits.v_c);
init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5;
init_int = dc_fixpt_floor(data->inits.v_c);
REG_SET_2(SCL_VERT_FILTER_INIT_C, 0,
SCL_V_INIT_FRAC_C, init_frac,
SCL_V_INIT_INT_C, init_int);
init_frac = dal_fixed31_32_u0d19(data->inits.v_c_bot) << 5;
init_int = dal_fixed31_32_floor(data->inits.v_c_bot);
init_frac = dc_fixpt_u0d19(data->inits.v_c_bot) << 5;
init_int = dc_fixpt_floor(data->inits.v_c_bot);
REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
SCL_V_INIT_FRAC_BOT_C, init_frac,
SCL_V_INIT_INT_BOT_C, init_int);

View File

@ -476,6 +476,14 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
}
void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
{
uint32_t reset_en = reset ? 1 : 0;
REG_UPDATE(DCHUBBUB_SOFT_RESET,
DCHUBBUB_GLOBAL_SOFT_RESET, reset_en);
}
static bool hubbub1_dcc_support_swizzle(
enum swizzle_mode_values swizzle,
unsigned int bytes_per_element,

View File

@ -48,7 +48,8 @@
SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
SR(DCHUBBUB_TEST_DEBUG_INDEX), \
SR(DCHUBBUB_TEST_DEBUG_DATA)
SR(DCHUBBUB_TEST_DEBUG_DATA),\
SR(DCHUBBUB_SOFT_RESET)
#define HUBBUB_SR_WATERMARK_REG_LIST()\
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
@ -105,6 +106,7 @@ struct dcn_hubbub_registers {
uint32_t DCHUBBUB_SDPIF_AGP_BOT;
uint32_t DCHUBBUB_SDPIF_AGP_TOP;
uint32_t DCHUBBUB_CRC_CTRL;
uint32_t DCHUBBUB_SOFT_RESET;
};
/* set field name */
@ -114,6 +116,7 @@ struct dcn_hubbub_registers {
#define HUBBUB_MASK_SH_LIST_DCN(mask_sh)\
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \
@ -143,6 +146,7 @@ struct dcn_hubbub_registers {
type DCHUBBUB_ARB_SAT_LEVEL;\
type DCHUBBUB_ARB_MIN_REQ_OUTSTAND;\
type DCHUBBUB_GLOBAL_TIMER_REFDIV;\
type DCHUBBUB_GLOBAL_SOFT_RESET; \
type SDPIF_FB_TOP;\
type SDPIF_FB_BASE;\
type SDPIF_FB_OFFSET;\
@ -201,6 +205,7 @@ void hubbub1_toggle_watermark_change_req(
void hubbub1_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm);
void hubbub1_soft_reset(struct hubbub *hubbub, bool reset);
void hubbub1_construct(struct hubbub *hubbub,
struct dc_context *ctx,
const struct dcn_hubbub_registers *hubbub_regs,

View File

@ -78,6 +78,27 @@ static void hubp1_disconnect(struct hubp *hubp)
CURSOR_ENABLE, 0);
}
static void hubp1_disable_control(struct hubp *hubp, bool disable_hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t disable = disable_hubp ? 1 : 0;
REG_UPDATE(DCHUBP_CNTL,
HUBP_DISABLE, disable);
}
static unsigned int hubp1_get_underflow_status(struct hubp *hubp)
{
uint32_t hubp_underflow = 0;
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
REG_GET(DCHUBP_CNTL,
HUBP_UNDERFLOW_STATUS,
&hubp_underflow);
return hubp_underflow;
}
static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@ -1054,8 +1075,8 @@ void hubp1_cursor_set_position(
ASSERT(param->h_scale_ratio.value);
if (param->h_scale_ratio.value)
dst_x_offset = dal_fixed31_32_floor(dal_fixed31_32_div(
dal_fixed31_32_from_int(dst_x_offset),
dst_x_offset = dc_fixpt_floor(dc_fixpt_div(
dc_fixpt_from_int(dst_x_offset),
param->h_scale_ratio));
if (src_x_offset >= (int)param->viewport_width)
@ -1117,6 +1138,9 @@ static struct hubp_funcs dcn10_hubp_funcs = {
.hubp_clk_cntl = hubp1_clk_cntl,
.hubp_vtg_sel = hubp1_vtg_sel,
.hubp_read_state = hubp1_read_state,
.hubp_disable_control = hubp1_disable_control,
.hubp_get_underflow_status = hubp1_get_underflow_status,
};
/*****************************************/

View File

@ -253,6 +253,7 @@
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
@ -421,6 +422,7 @@
#define DCN_HUBP_REG_FIELD_LIST(type) \
type HUBP_BLANK_EN;\
type HUBP_DISABLE;\
type HUBP_TTU_DISABLE;\
type HUBP_NO_OUTSTANDING_REQ;\
type HUBP_VTG_SEL;\
@ -723,4 +725,5 @@ void hubp1_read_state(struct hubp *hubp);
enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
#endif

View File

@ -127,24 +127,26 @@ static void dcn10_log_hubp_states(struct dc *dc)
hubp->funcs->hubp_read_state(hubp);
DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh"
" %6d %8d %7d %8xh",
hubp->inst,
s->pixel_format,
s->inuse_addr_hi,
s->viewport_width,
s->viewport_height,
s->rotation_angle,
s->h_mirror_en,
s->sw_mode,
s->dcc_en,
s->blank_en,
s->ttu_disable,
s->underflow_status);
DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
DTN_INFO("\n");
if (!s->blank_en) {
DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh"
" %6d %8d %7d %8xh",
hubp->inst,
s->pixel_format,
s->inuse_addr_hi,
s->viewport_width,
s->viewport_height,
s->rotation_angle,
s->h_mirror_en,
s->sw_mode,
s->dcc_en,
s->blank_en,
s->ttu_disable,
s->underflow_status);
DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
DTN_INFO("\n");
}
}
DTN_INFO("\n=========RQ========\n");
@ -155,16 +157,17 @@ static void dcn10_log_hubp_states(struct dc *dc)
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
i, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
if (!s->blank_en)
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
}
DTN_INFO("========DLG========\n");
@ -179,27 +182,28 @@ static void dcn10_log_hubp_states(struct dc *dc)
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
"% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
" %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
i, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
dlg_regs->xfc_reg_remote_surface_flip_latency);
if (!s->blank_en)
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
"% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
" %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
dlg_regs->xfc_reg_remote_surface_flip_latency);
}
DTN_INFO("========TTU========\n");
@ -210,14 +214,15 @@ static void dcn10_log_hubp_states(struct dc *dc)
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
i, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
if (!s->blank_en)
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
}
DTN_INFO("\n");
}
@ -321,6 +326,12 @@ void dcn10_log_hw_state(struct dc *dc)
s.h_total,
s.v_total,
s.underflow_occurred_status);
// Clear underflow for debug purposes
// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
// This function is called only from Windows or Diags test environment, hence it's safe to clear
// it from here without affecting the original intent.
tg->funcs->clear_optc_underflow(tg);
}
DTN_INFO("\n");
@ -747,6 +758,90 @@ static void reset_back_end_for_pipe(
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}
static bool dcn10_hw_wa_force_recovery(struct dc *dc)
{
struct hubp *hubp ;
unsigned int i;
bool need_recover = true;
if (!dc->debug.recovery_enabled)
return false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
if (hubp != NULL) {
if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
/* one pipe underflow, we will reset all the pipes*/
need_recover = true;
}
}
}
}
if (!need_recover)
return false;
/*
DCHUBP_CNTL:HUBP_BLANK_EN=1
DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
DCHUBP_CNTL:HUBP_DISABLE=1
DCHUBP_CNTL:HUBP_DISABLE=0
DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
DCSURF_PRIMARY_SURFACE_ADDRESS
DCHUBP_CNTL:HUBP_BLANK_EN=0
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
if (hubp != NULL)
hubp->funcs->set_hubp_blank_en(hubp, true);
}
}
/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
hubbub1_soft_reset(dc->res_pool->hubbub, true);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_DISABLE=1*/
if (hubp != NULL)
hubp->funcs->hubp_disable_control(hubp, true);
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_DISABLE=0*/
if (hubp != NULL)
hubp->funcs->hubp_disable_control(hubp, true);
}
}
/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
hubbub1_soft_reset(dc->res_pool->hubbub, false);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
if (hubp != NULL)
hubp->funcs->set_hubp_blank_en(hubp, true);
}
}
return true;
}
static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
{
static bool should_log_hw_state; /* prevent hw state log by default */
@ -755,13 +850,17 @@ static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
if (should_log_hw_state) {
dcn10_log_hw_state(dc);
}
BREAK_TO_DEBUGGER();
if (dcn10_hw_wa_force_recovery(dc)) {
/*check again*/
if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
BREAK_TO_DEBUGGER();
}
}
}
/* trigger HW to start disconnect plane from stream on the next vsync */
static void plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
int dpp_id = pipe_ctx->plane_res.dpp->inst;
@ -944,7 +1043,7 @@ static void dcn10_init_hw(struct dc *dc)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
plane_atomic_disconnect(dc, pipe_ctx);
hwss1_plane_atomic_disconnect(dc, pipe_ctx);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@ -1685,22 +1784,22 @@ static uint16_t fixed_point_to_int_frac(
uint16_t result;
uint16_t d = (uint16_t)dal_fixed31_32_floor(
dal_fixed31_32_abs(
uint16_t d = (uint16_t)dc_fixpt_floor(
dc_fixpt_abs(
arg));
if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
numerator = (uint16_t)dal_fixed31_32_floor(
dal_fixed31_32_mul_int(
numerator = (uint16_t)dc_fixpt_floor(
dc_fixpt_mul_int(
arg,
divisor));
else {
numerator = dal_fixed31_32_floor(
dal_fixed31_32_sub(
dal_fixed31_32_from_int(
numerator = dc_fixpt_floor(
dc_fixpt_sub(
dc_fixpt_from_int(
1LL << integer_bits),
dal_fixed31_32_recip(
dal_fixed31_32_from_int(
dc_fixpt_recip(
dc_fixpt_from_int(
divisor))));
}
@ -1710,8 +1809,8 @@ static uint16_t fixed_point_to_int_frac(
result = (uint16_t)(
(1 << (integer_bits + fractional_bits + 1)) + numerator);
if ((result != 0) && dal_fixed31_32_lt(
arg, dal_fixed31_32_zero))
if ((result != 0) && dc_fixpt_lt(
arg, dc_fixpt_zero))
result |= 1 << (integer_bits + fractional_bits);
return result;
@ -1725,8 +1824,8 @@ void build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
&& plane_state->input_csc_color_matrix.enable_adjustment
&& plane_state->coeff_reduction_factor.value != 0) {
bias_and_scale->scale_blue = fixed_point_to_int_frac(
dal_fixed31_32_mul(plane_state->coeff_reduction_factor,
dal_fixed31_32_from_fraction(256, 255)),
dc_fixpt_mul(plane_state->coeff_reduction_factor,
dc_fixpt_from_fraction(256, 255)),
2,
13);
bias_and_scale->scale_red = bias_and_scale->scale_blue;
@ -1995,7 +2094,7 @@ static void dcn10_blank_pixel_data(
static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
{
struct fixed31_32 multiplier = dal_fixed31_32_from_fraction(
struct fixed31_32 multiplier = dc_fixpt_from_fraction(
pipe_ctx->plane_state->sdr_white_level, 80);
uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
struct custom_float_format fmt;
@ -2179,7 +2278,7 @@ static void dcn10_apply_ctx_for_surface(
old_pipe_ctx->plane_state &&
old_pipe_ctx->stream_res.tg == tg) {
plane_atomic_disconnect(dc, old_pipe_ctx);
hwss1_plane_atomic_disconnect(dc, old_pipe_ctx);
removed_pipe[i] = true;
DC_LOG_DC(
@ -2487,15 +2586,6 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
}
static void set_plane_config(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct resource_context *res_ctx)
{
/* TODO */
program_gamut_remap(pipe_ctx);
}
static void dcn10_config_stereo_parameters(
struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
{
@ -2673,7 +2763,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.init_hw = dcn10_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
.set_plane_config = set_plane_config,
.update_plane_addr = dcn10_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,

View File

@ -37,4 +37,6 @@ extern void fill_display_configs(
bool is_rgb_cspace(enum dc_color_space output_color_space);
void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
#endif /* __DC_HWSS_DCN10_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,330 @@
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DC_LINK_ENCODER__DCN10_H__
#define __DC_LINK_ENCODER__DCN10_H__
#include "link_encoder.h"
#define TO_DCN10_LINK_ENC(link_encoder)\
container_of(link_encoder, struct dcn10_link_encoder, base)
#define AUX_REG_LIST(id)\
SRI(AUX_CONTROL, DP_AUX, id), \
SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
#define HPD_REG_LIST(id)\
SRI(DC_HPD_CONTROL, HPD, id)
#define LE_DCN_COMMON_REG_LIST(id) \
SRI(DIG_BE_CNTL, DIG, id), \
SRI(DIG_BE_EN_CNTL, DIG, id), \
SRI(DP_CONFIG, DP, id), \
SRI(DP_DPHY_CNTL, DP, id), \
SRI(DP_DPHY_PRBS_CNTL, DP, id), \
SRI(DP_DPHY_SCRAM_CNTL, DP, id),\
SRI(DP_DPHY_SYM0, DP, id), \
SRI(DP_DPHY_SYM1, DP, id), \
SRI(DP_DPHY_SYM2, DP, id), \
SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
SRI(DP_LINK_CNTL, DP, id), \
SRI(DP_LINK_FRAMING_CNTL, DP, id), \
SRI(DP_MSE_SAT0, DP, id), \
SRI(DP_MSE_SAT1, DP, id), \
SRI(DP_MSE_SAT2, DP, id), \
SRI(DP_MSE_SAT_UPDATE, DP, id), \
SRI(DP_SEC_CNTL, DP, id), \
SRI(DP_VID_STREAM_CNTL, DP, id), \
SRI(DP_DPHY_FAST_TRAINING, DP, id), \
SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
#define LE_DCN10_REG_LIST(id)\
LE_DCN_COMMON_REG_LIST(id)
struct dcn10_link_enc_aux_registers {
uint32_t AUX_CONTROL;
uint32_t AUX_DPHY_RX_CONTROL0;
};
struct dcn10_link_enc_hpd_registers {
uint32_t DC_HPD_CONTROL;
};
struct dcn10_link_enc_registers {
uint32_t DIG_BE_CNTL;
uint32_t DIG_BE_EN_CNTL;
uint32_t DP_CONFIG;
uint32_t DP_DPHY_CNTL;
uint32_t DP_DPHY_INTERNAL_CTRL;
uint32_t DP_DPHY_PRBS_CNTL;
uint32_t DP_DPHY_SCRAM_CNTL;
uint32_t DP_DPHY_SYM0;
uint32_t DP_DPHY_SYM1;
uint32_t DP_DPHY_SYM2;
uint32_t DP_DPHY_TRAINING_PATTERN_SEL;
uint32_t DP_LINK_CNTL;
uint32_t DP_LINK_FRAMING_CNTL;
uint32_t DP_MSE_SAT0;
uint32_t DP_MSE_SAT1;
uint32_t DP_MSE_SAT2;
uint32_t DP_MSE_SAT_UPDATE;
uint32_t DP_SEC_CNTL;
uint32_t DP_VID_STREAM_CNTL;
uint32_t DP_DPHY_FAST_TRAINING;
uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
uint32_t DP_SEC_CNTL1;
};
#define LE_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh)\
LE_SF(DIG0_DIG_BE_EN_CNTL, DIG_ENABLE, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE1, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE2, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE3, mask_sh),\
LE_SF(DP0_DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, mask_sh),\
LE_SF(DP0_DP_DPHY_PRBS_CNTL, DPHY_PRBS_SEL, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM1, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM2, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM3, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM4, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM5, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM6, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM2, DPHY_SYM7, mask_sh),\
LE_SF(DP0_DP_DPHY_SYM2, DPHY_SYM8, mask_sh),\
LE_SF(DP0_DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, mask_sh),\
LE_SF(DP0_DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, mask_sh),\
LE_SF(DP0_DP_DPHY_FAST_TRAINING, DPHY_RX_FAST_TRAINING_CAPABLE, mask_sh),\
LE_SF(DP0_DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, mask_sh),\
LE_SF(DP0_DP_DPHY_TRAINING_PATTERN_SEL, DPHY_TRAINING_PATTERN_SEL, mask_sh),\
LE_SF(DP0_DP_DPHY_HBR2_PATTERN_CONTROL, DP_DPHY_HBR2_PATTERN_CONTROL, mask_sh),\
LE_SF(DP0_DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, mask_sh),\
LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_IDLE_BS_INTERVAL, mask_sh),\
LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_VBID_DISABLE, mask_sh),\
LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_VID_ENHANCED_FRAME_MODE, mask_sh),\
LE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
LE_SF(DP0_DP_CONFIG, DP_UDI_LANES, mask_sh),\
LE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP0_LINE_NUM, mask_sh),\
LE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP0_PRIORITY, mask_sh),\
LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SRC0, mask_sh),\
LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SRC1, mask_sh),\
LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SLOT_COUNT0, mask_sh),\
LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SLOT_COUNT1, mask_sh),\
LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SRC2, mask_sh),\
LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SRC3, mask_sh),\
LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SLOT_COUNT2, mask_sh),\
LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SLOT_COUNT3, mask_sh),\
LE_SF(DP0_DP_MSE_SAT_UPDATE, DP_MSE_SAT_UPDATE, mask_sh),\
LE_SF(DP0_DP_MSE_SAT_UPDATE, DP_MSE_16_MTP_KEEPOUT, mask_sh),\
LE_SF(DP_AUX0_AUX_CONTROL, AUX_HPD_SEL, mask_sh),\
LE_SF(DP_AUX0_AUX_CONTROL, AUX_LS_READ_EN, mask_sh),\
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_RECEIVE_WINDOW, mask_sh),\
LE_SF(HPD0_DC_HPD_CONTROL, DC_HPD_EN, mask_sh)
#define DCN_LINK_ENCODER_REG_FIELD_LIST(type) \
type DIG_ENABLE;\
type DIG_HPD_SELECT;\
type DIG_MODE;\
type DIG_FE_SOURCE_SELECT;\
type DPHY_BYPASS;\
type DPHY_ATEST_SEL_LANE0;\
type DPHY_ATEST_SEL_LANE1;\
type DPHY_ATEST_SEL_LANE2;\
type DPHY_ATEST_SEL_LANE3;\
type DPHY_PRBS_EN;\
type DPHY_PRBS_SEL;\
type DPHY_SYM1;\
type DPHY_SYM2;\
type DPHY_SYM3;\
type DPHY_SYM4;\
type DPHY_SYM5;\
type DPHY_SYM6;\
type DPHY_SYM7;\
type DPHY_SYM8;\
type DPHY_SCRAMBLER_BS_COUNT;\
type DPHY_SCRAMBLER_ADVANCE;\
type DPHY_RX_FAST_TRAINING_CAPABLE;\
type DPHY_LOAD_BS_COUNT;\
type DPHY_TRAINING_PATTERN_SEL;\
type DP_DPHY_HBR2_PATTERN_CONTROL;\
type DP_LINK_TRAINING_COMPLETE;\
type DP_IDLE_BS_INTERVAL;\
type DP_VBID_DISABLE;\
type DP_VID_ENHANCED_FRAME_MODE;\
type DP_VID_STREAM_ENABLE;\
type DP_UDI_LANES;\
type DP_SEC_GSP0_LINE_NUM;\
type DP_SEC_GSP0_PRIORITY;\
type DP_MSE_SAT_SRC0;\
type DP_MSE_SAT_SRC1;\
type DP_MSE_SAT_SRC2;\
type DP_MSE_SAT_SRC3;\
type DP_MSE_SAT_SLOT_COUNT0;\
type DP_MSE_SAT_SLOT_COUNT1;\
type DP_MSE_SAT_SLOT_COUNT2;\
type DP_MSE_SAT_SLOT_COUNT3;\
type DP_MSE_SAT_UPDATE;\
type DP_MSE_16_MTP_KEEPOUT;\
type AUX_HPD_SEL;\
type AUX_LS_READ_EN;\
type AUX_RX_RECEIVE_WINDOW;\
type DC_HPD_EN
struct dcn10_link_enc_shift {
DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
};
struct dcn10_link_enc_mask {
DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
};
struct dcn10_link_encoder {
struct link_encoder base;
const struct dcn10_link_enc_registers *link_regs;
const struct dcn10_link_enc_aux_registers *aux_regs;
const struct dcn10_link_enc_hpd_registers *hpd_regs;
const struct dcn10_link_enc_shift *link_shift;
const struct dcn10_link_enc_mask *link_mask;
};
void dcn10_link_encoder_construct(
struct dcn10_link_encoder *enc10,
const struct encoder_init_data *init_data,
const struct encoder_feature_support *enc_features,
const struct dcn10_link_enc_registers *link_regs,
const struct dcn10_link_enc_aux_registers *aux_regs,
const struct dcn10_link_enc_hpd_registers *hpd_regs,
const struct dcn10_link_enc_shift *link_shift,
const struct dcn10_link_enc_mask *link_mask);
bool dcn10_link_encoder_validate_dvi_output(
const struct dcn10_link_encoder *enc10,
enum signal_type connector_signal,
enum signal_type signal,
const struct dc_crtc_timing *crtc_timing);
bool dcn10_link_encoder_validate_rgb_output(
const struct dcn10_link_encoder *enc10,
const struct dc_crtc_timing *crtc_timing);
bool dcn10_link_encoder_validate_dp_output(
const struct dcn10_link_encoder *enc10,
const struct dc_crtc_timing *crtc_timing);
bool dcn10_link_encoder_validate_wireless_output(
const struct dcn10_link_encoder *enc10,
const struct dc_crtc_timing *crtc_timing);
bool dcn10_link_encoder_validate_output_with_stream(
struct link_encoder *enc,
const struct dc_stream_state *stream);
/****************** HW programming ************************/
/* initialize HW */ /* why do we initialze aux in here? */
void dcn10_link_encoder_hw_init(struct link_encoder *enc);
void dcn10_link_encoder_destroy(struct link_encoder **enc);
/* program DIG_MODE in DIG_BE */
/* TODO can this be combined with enable_output? */
void dcn10_link_encoder_setup(
struct link_encoder *enc,
enum signal_type signal);
/* enables TMDS PHY output */
/* TODO: still need depth or just pass in adjusted pixel clock? */
void dcn10_link_encoder_enable_tmds_output(
struct link_encoder *enc,
enum clock_source_id clock_source,
enum dc_color_depth color_depth,
enum signal_type signal,
uint32_t pixel_clock);
/* enables DP PHY output */
void dcn10_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
/* enables DP PHY output in MST mode */
void dcn10_link_encoder_enable_dp_mst_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
/* disable PHY output */
void dcn10_link_encoder_disable_output(
struct link_encoder *enc,
enum signal_type signal);
/* set DP lane settings */
void dcn10_link_encoder_dp_set_lane_settings(
struct link_encoder *enc,
const struct link_training_settings *link_settings);
void dcn10_link_encoder_dp_set_phy_pattern(
struct link_encoder *enc,
const struct encoder_set_dp_phy_pattern_param *param);
/* programs DP MST VC payload allocation */
void dcn10_link_encoder_update_mst_stream_allocation_table(
struct link_encoder *enc,
const struct link_mst_stream_allocation_table *table);
void dcn10_link_encoder_connect_dig_be_to_fe(
struct link_encoder *enc,
enum engine_id engine,
bool connect);
void dcn10_link_encoder_set_dp_phy_pattern_training_pattern(
struct link_encoder *enc,
uint32_t index);
void dcn10_link_encoder_enable_hpd(struct link_encoder *enc);
void dcn10_link_encoder_disable_hpd(struct link_encoder *enc);
void dcn10_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
bool exit_link_training_required);
void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
unsigned int sdp_transmit_line_num_deadline);
bool dcn10_is_dig_enabled(struct link_encoder *enc);
#endif /* __DC_LINK_ENCODER__DCN10_H__ */

View File

@ -360,7 +360,7 @@ void optc1_program_timing(
}
static void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@ -1257,20 +1257,20 @@ void optc1_read_otg_state(struct optc *optc1,
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
}
static void optc1_clear_optc_underflow(struct timing_generator *optc)
void optc1_clear_optc_underflow(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE(OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, 1);
}
static void optc1_tg_init(struct timing_generator *optc)
void optc1_tg_init(struct timing_generator *optc)
{
optc1_set_blank_data_double_buffer(optc, true);
optc1_clear_optc_underflow(optc);
}
static bool optc1_is_tg_enabled(struct timing_generator *optc)
bool optc1_is_tg_enabled(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t otg_enabled = 0;
@ -1281,7 +1281,7 @@ static bool optc1_is_tg_enabled(struct timing_generator *optc)
}
static bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t underflow_occurred = 0;

View File

@ -497,4 +497,14 @@ void optc1_program_stereo(struct timing_generator *optc,
bool optc1_is_stereo_left_eye(struct timing_generator *optc);
void optc1_clear_optc_underflow(struct timing_generator *optc);
void optc1_tg_init(struct timing_generator *optc);
bool optc1_is_tg_enabled(struct timing_generator *optc);
bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */

View File

@ -38,7 +38,7 @@
#include "dcn10/dcn10_hw_sequencer.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn10/dcn10_opp.h"
#include "dce/dce_link_encoder.h"
#include "dcn10/dcn10_link_encoder.h"
#include "dcn10/dcn10_stream_encoder.h"
#include "dce/dce_clocks.h"
#include "dce/dce_clock_source.h"
@ -214,13 +214,11 @@ static const struct dce_aduio_mask audio_mask = {
AUX_REG_LIST(id)\
}
static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
aux_regs(0),
aux_regs(1),
aux_regs(2),
aux_regs(3),
aux_regs(4),
aux_regs(5)
aux_regs(3)
};
#define hpd_regs(id)\
@ -228,13 +226,11 @@ static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
HPD_REG_LIST(id)\
}
static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
hpd_regs(0),
hpd_regs(1),
hpd_regs(2),
hpd_regs(3),
hpd_regs(4),
hpd_regs(5)
hpd_regs(3)
};
#define link_regs(id)\
@ -243,14 +239,19 @@ static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
static const struct dce110_link_enc_registers link_enc_regs[] = {
static const struct dcn10_link_enc_registers link_enc_regs[] = {
link_regs(0),
link_regs(1),
link_regs(2),
link_regs(3),
link_regs(4),
link_regs(5),
link_regs(6),
link_regs(3)
};
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN10(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
};
#define ipp_regs(id)\
@ -446,6 +447,8 @@ static const struct dc_debug debug_defaults_drv = {
.vsr_support = true,
.performance_trace = false,
.az_endpoint_mute_only = true,
.recovery_enabled = false, /*enable this by default after testing.*/
.max_downscale_src_width = 3840,
};
static const struct dc_debug debug_defaults_diags = {
@ -581,20 +584,22 @@ static const struct encoder_feature_support link_enc_feature = {
struct link_encoder *dcn10_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
struct dcn10_link_encoder *enc10 =
kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL);
if (!enc110)
if (!enc10)
return NULL;
dce110_link_encoder_construct(enc110,
dcn10_link_encoder_construct(enc10,
enc_init_data,
&link_enc_feature,
&link_enc_regs[enc_init_data->transmitter],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
&le_mask);
return &enc110->base;
return &enc10->base;
}
struct clock_source *dcn10_clock_source_create(
@ -1021,6 +1026,7 @@ static bool construct(
dc->caps.max_cursor_size = 256;
dc->caps.max_slave_planes = 1;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = false;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;

View File

@ -603,11 +603,11 @@ void enc1_stream_encoder_set_mst_bandwidth(
struct fixed31_32 avg_time_slots_per_mtp)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
uint32_t x = dal_fixed31_32_floor(
uint32_t x = dc_fixpt_floor(
avg_time_slots_per_mtp);
uint32_t y = dal_fixed31_32_ceil(
dal_fixed31_32_shl(
dal_fixed31_32_sub_int(
uint32_t y = dc_fixpt_ceil(
dc_fixpt_shl(
dc_fixpt_sub_int(
avg_time_slots_per_mtp,
x),
26));

View File

@ -355,10 +355,6 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line);
/*
* Debug and verification hooks
*/
bool dm_helpers_dc_conn_log(
struct dc_context *ctx,
struct log_entry *entry,
enum dc_log_type event);
void dm_dtn_log_begin(struct dc_context *ctx);
void dm_dtn_log_append_v(struct dc_context *ctx, const char *msg, ...);

View File

@ -108,4 +108,17 @@ enum output_standard {
dm_std_uninitialized = 0, dm_std_cvtr2, dm_std_cvt
};
enum mpc_combine_affinity {
dm_mpc_always_when_possible,
dm_mpc_reduce_voltage,
dm_mpc_reduce_voltage_and_clocks
};
enum self_refresh_affinity {
dm_try_to_allow_self_refresh_and_mclk_switch,
dm_allow_self_refresh_and_mclk_switch,
dm_allow_self_refresh,
dm_neither_self_refresh_nor_mclk_switch
};
#endif

View File

@ -25,39 +25,39 @@
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
typedef struct _vcs_dpi_ip_params_st ip_params_st;
typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st;
typedef struct _vcs_dpi_display_output_params_st display_output_params_st;
typedef struct _vcs_dpi_display_bandwidth_st display_bandwidth_st;
typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st;
typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st;
typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st;
typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st;
typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st;
typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st;
typedef struct _vcs_dpi_dchub_buffer_sizing_st dchub_buffer_sizing_st;
typedef struct _vcs_dpi_watermarks_perf_st watermarks_perf_st;
typedef struct _vcs_dpi_cstate_pstate_watermarks_st cstate_pstate_watermarks_st;
typedef struct _vcs_dpi_wm_calc_pipe_params_st wm_calc_pipe_params_st;
typedef struct _vcs_dpi_vratio_pre_st vratio_pre_st;
typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st;
typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st;
typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st;
typedef struct _vcs_dpi_display_cur_rq_dlg_params_st display_cur_rq_dlg_params_st;
typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st;
typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st;
typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st;
typedef struct _vcs_dpi_display_rq_params_st display_rq_params_st;
typedef struct _vcs_dpi_display_dlg_regs_st display_dlg_regs_st;
typedef struct _vcs_dpi_display_ttu_regs_st display_ttu_regs_st;
typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st;
typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st;
typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st;
typedef struct _vcs_dpi_display_dlg_prefetch_param_st display_dlg_prefetch_param_st;
typedef struct _vcs_dpi_display_pipe_clock_st display_pipe_clock_st;
typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st;
typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
typedef struct _vcs_dpi_ip_params_st ip_params_st;
typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st;
typedef struct _vcs_dpi_display_output_params_st display_output_params_st;
typedef struct _vcs_dpi_display_bandwidth_st display_bandwidth_st;
typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st;
typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st;
typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st;
typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st;
typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st;
typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st;
typedef struct _vcs_dpi_dchub_buffer_sizing_st dchub_buffer_sizing_st;
typedef struct _vcs_dpi_watermarks_perf_st watermarks_perf_st;
typedef struct _vcs_dpi_cstate_pstate_watermarks_st cstate_pstate_watermarks_st;
typedef struct _vcs_dpi_wm_calc_pipe_params_st wm_calc_pipe_params_st;
typedef struct _vcs_dpi_vratio_pre_st vratio_pre_st;
typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st;
typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st;
typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st;
typedef struct _vcs_dpi_display_cur_rq_dlg_params_st display_cur_rq_dlg_params_st;
typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st;
typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st;
typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st;
typedef struct _vcs_dpi_display_rq_params_st display_rq_params_st;
typedef struct _vcs_dpi_display_dlg_regs_st display_dlg_regs_st;
typedef struct _vcs_dpi_display_ttu_regs_st display_ttu_regs_st;
typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st;
typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st;
typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st;
typedef struct _vcs_dpi_display_dlg_prefetch_param_st display_dlg_prefetch_param_st;
typedef struct _vcs_dpi_display_pipe_clock_st display_pipe_clock_st;
typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st;
struct _vcs_dpi_voltage_scaling_st {
int state;
@ -72,89 +72,107 @@ struct _vcs_dpi_voltage_scaling_st {
double dppclk_mhz;
};
struct _vcs_dpi_soc_bounding_box_st {
double sr_exit_time_us;
double sr_enter_plus_exit_time_us;
double urgent_latency_us;
double writeback_latency_us;
double ideal_dram_bw_after_urgent_percent;
unsigned int max_request_size_bytes;
double downspread_percent;
double dram_page_open_time_ns;
double dram_rw_turnaround_time_ns;
double dram_return_buffer_per_channel_bytes;
double dram_channel_width_bytes;
struct _vcs_dpi_soc_bounding_box_st {
double sr_exit_time_us;
double sr_enter_plus_exit_time_us;
double urgent_latency_us;
double urgent_latency_pixel_data_only_us;
double urgent_latency_pixel_mixed_with_vm_data_us;
double urgent_latency_vm_data_only_us;
double writeback_latency_us;
double ideal_dram_bw_after_urgent_percent;
double pct_ideal_dram_sdp_bw_after_urgent_pixel_only; // PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly
double pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm;
double pct_ideal_dram_sdp_bw_after_urgent_vm_only;
double max_avg_sdp_bw_use_normal_percent;
double max_avg_dram_bw_use_normal_percent;
unsigned int max_request_size_bytes;
double downspread_percent;
double dram_page_open_time_ns;
double dram_rw_turnaround_time_ns;
double dram_return_buffer_per_channel_bytes;
double dram_channel_width_bytes;
double fabric_datapath_to_dcn_data_return_bytes;
double dcn_downspread_percent;
double dispclk_dppclk_vco_speed_mhz;
double dfs_vco_period_ps;
unsigned int round_trip_ping_latency_dcfclk_cycles;
unsigned int urgent_out_of_order_return_per_channel_bytes;
unsigned int channel_interleave_bytes;
unsigned int num_banks;
unsigned int num_chans;
unsigned int vmm_page_size_bytes;
double dram_clock_change_latency_us;
double writeback_dram_clock_change_latency_us;
unsigned int return_bus_width_bytes;
unsigned int voltage_override;
double xfc_bus_transport_time_us;
double xfc_xbuf_latency_tolerance_us;
unsigned int urgent_out_of_order_return_per_channel_pixel_only_bytes;
unsigned int urgent_out_of_order_return_per_channel_pixel_and_vm_bytes;
unsigned int urgent_out_of_order_return_per_channel_vm_only_bytes;
unsigned int round_trip_ping_latency_dcfclk_cycles;
unsigned int urgent_out_of_order_return_per_channel_bytes;
unsigned int channel_interleave_bytes;
unsigned int num_banks;
unsigned int num_chans;
unsigned int vmm_page_size_bytes;
double dram_clock_change_latency_us;
double writeback_dram_clock_change_latency_us;
unsigned int return_bus_width_bytes;
unsigned int voltage_override;
double xfc_bus_transport_time_us;
double xfc_xbuf_latency_tolerance_us;
int use_urgent_burst_bw;
struct _vcs_dpi_voltage_scaling_st clock_limits[7];
};
struct _vcs_dpi_ip_params_st {
unsigned int max_inter_dcn_tile_repeaters;
unsigned int num_dsc;
unsigned int odm_capable;
unsigned int rob_buffer_size_kbytes;
unsigned int det_buffer_size_kbytes;
unsigned int dpte_buffer_size_in_pte_reqs;
unsigned int pde_proc_buffer_size_64k_reqs;
unsigned int dpp_output_buffer_pixels;
unsigned int opp_output_buffer_lines;
unsigned int pixel_chunk_size_kbytes;
unsigned char pte_enable;
unsigned int pte_chunk_size_kbytes;
unsigned int meta_chunk_size_kbytes;
unsigned int writeback_chunk_size_kbytes;
unsigned int line_buffer_size_bits;
unsigned int max_line_buffer_lines;
unsigned int writeback_luma_buffer_size_kbytes;
unsigned int writeback_chroma_buffer_size_kbytes;
unsigned int writeback_chroma_line_buffer_width_pixels;
unsigned int max_page_table_levels;
unsigned int max_num_dpp;
unsigned int max_num_otg;
unsigned int cursor_chunk_size;
unsigned int cursor_buffer_size;
unsigned int max_num_wb;
unsigned int max_dchub_pscl_bw_pix_per_clk;
unsigned int max_pscl_lb_bw_pix_per_clk;
unsigned int max_lb_vscl_bw_pix_per_clk;
unsigned int max_vscl_hscl_bw_pix_per_clk;
double max_hscl_ratio;
double max_vscl_ratio;
unsigned int hscl_mults;
unsigned int vscl_mults;
unsigned int max_hscl_taps;
unsigned int max_vscl_taps;
unsigned int xfc_supported;
unsigned int xfc_fill_constant_bytes;
double dispclk_ramp_margin_percent;
double xfc_fill_bw_overhead_percent;
double underscan_factor;
unsigned int min_vblank_lines;
unsigned int dppclk_delay_subtotal;
unsigned int dispclk_delay_subtotal;
unsigned int dcfclk_cstate_latency;
unsigned int dppclk_delay_scl;
unsigned int dppclk_delay_scl_lb_only;
unsigned int dppclk_delay_cnvc_formatter;
unsigned int dppclk_delay_cnvc_cursor;
unsigned int is_line_buffer_bpp_fixed;
unsigned int line_buffer_fixed_bpp;
unsigned int dcc_supported;
struct _vcs_dpi_ip_params_st {
bool gpuvm_enable;
bool hostvm_enable;
unsigned int gpuvm_max_page_table_levels;
unsigned int hostvm_max_page_table_levels;
unsigned int hostvm_cached_page_table_levels;
unsigned int pte_group_size_bytes;
unsigned int max_inter_dcn_tile_repeaters;
unsigned int num_dsc;
unsigned int odm_capable;
unsigned int rob_buffer_size_kbytes;
unsigned int det_buffer_size_kbytes;
unsigned int dpte_buffer_size_in_pte_reqs;
unsigned int pde_proc_buffer_size_64k_reqs;
unsigned int dpp_output_buffer_pixels;
unsigned int opp_output_buffer_lines;
unsigned int pixel_chunk_size_kbytes;
unsigned char pte_enable;
unsigned int pte_chunk_size_kbytes;
unsigned int meta_chunk_size_kbytes;
unsigned int writeback_chunk_size_kbytes;
unsigned int line_buffer_size_bits;
unsigned int max_line_buffer_lines;
unsigned int writeback_luma_buffer_size_kbytes;
unsigned int writeback_chroma_buffer_size_kbytes;
unsigned int writeback_chroma_line_buffer_width_pixels;
unsigned int max_page_table_levels;
unsigned int max_num_dpp;
unsigned int max_num_otg;
unsigned int cursor_chunk_size;
unsigned int cursor_buffer_size;
unsigned int max_num_wb;
unsigned int max_dchub_pscl_bw_pix_per_clk;
unsigned int max_pscl_lb_bw_pix_per_clk;
unsigned int max_lb_vscl_bw_pix_per_clk;
unsigned int max_vscl_hscl_bw_pix_per_clk;
double max_hscl_ratio;
double max_vscl_ratio;
unsigned int hscl_mults;
unsigned int vscl_mults;
unsigned int max_hscl_taps;
unsigned int max_vscl_taps;
unsigned int xfc_supported;
unsigned int xfc_fill_constant_bytes;
double dispclk_ramp_margin_percent;
double xfc_fill_bw_overhead_percent;
double underscan_factor;
unsigned int min_vblank_lines;
unsigned int dppclk_delay_subtotal;
unsigned int dispclk_delay_subtotal;
unsigned int dcfclk_cstate_latency;
unsigned int dppclk_delay_scl;
unsigned int dppclk_delay_scl_lb_only;
unsigned int dppclk_delay_cnvc_formatter;
unsigned int dppclk_delay_cnvc_cursor;
unsigned int is_line_buffer_bpp_fixed;
unsigned int line_buffer_fixed_bpp;
unsigned int dcc_supported;
unsigned int IsLineBufferBppFixed;
unsigned int LineBufferFixedBpp;
@ -169,41 +187,45 @@ struct _vcs_dpi_display_xfc_params_st {
int xfc_slv_chunk_size_bytes;
};
struct _vcs_dpi_display_pipe_source_params_st {
int source_format;
unsigned char dcc;
unsigned int dcc_override;
unsigned int dcc_rate;
unsigned char dcc_use_global;
unsigned char vm;
unsigned char vm_levels_force_en;
unsigned int vm_levels_force;
int source_scan;
int sw_mode;
int macro_tile_size;
unsigned char is_display_sw;
unsigned int viewport_width;
unsigned int viewport_height;
unsigned int viewport_y_y;
unsigned int viewport_y_c;
unsigned int viewport_width_c;
unsigned int viewport_height_c;
unsigned int data_pitch;
unsigned int data_pitch_c;
unsigned int meta_pitch;
unsigned int meta_pitch_c;
unsigned int cur0_src_width;
int cur0_bpp;
unsigned int cur1_src_width;
int cur1_bpp;
int num_cursors;
unsigned char is_hsplit;
unsigned char dynamic_metadata_enable;
unsigned int dynamic_metadata_lines_before_active;
unsigned int dynamic_metadata_xmit_bytes;
unsigned int hsplit_grp;
unsigned char xfc_enable;
unsigned char xfc_slave;
struct _vcs_dpi_display_pipe_source_params_st {
int source_format;
unsigned char dcc;
unsigned int dcc_override;
unsigned int dcc_rate;
unsigned char dcc_use_global;
unsigned char vm;
bool gpuvm; // gpuvm enabled
bool hostvm; // hostvm enabled
bool gpuvm_levels_force_en;
unsigned int gpuvm_levels_force;
bool hostvm_levels_force_en;
unsigned int hostvm_levels_force;
int source_scan;
int sw_mode;
int macro_tile_size;
unsigned char is_display_sw;
unsigned int viewport_width;
unsigned int viewport_height;
unsigned int viewport_y_y;
unsigned int viewport_y_c;
unsigned int viewport_width_c;
unsigned int viewport_height_c;
unsigned int data_pitch;
unsigned int data_pitch_c;
unsigned int meta_pitch;
unsigned int meta_pitch_c;
unsigned int cur0_src_width;
int cur0_bpp;
unsigned int cur1_src_width;
int cur1_bpp;
int num_cursors;
unsigned char is_hsplit;
unsigned char dynamic_metadata_enable;
unsigned int dynamic_metadata_lines_before_active;
unsigned int dynamic_metadata_xmit_bytes;
unsigned int hsplit_grp;
unsigned char xfc_enable;
unsigned char xfc_slave;
struct _vcs_dpi_display_xfc_params_st xfc_params;
};
struct writeback_st {
@ -219,335 +241,335 @@ struct writeback_st {
double wb_vratio;
};
struct _vcs_dpi_display_output_params_st {
int dp_lanes;
int output_bpp;
int dsc_enable;
int wb_enable;
int num_active_wb;
int opp_input_bpc;
int output_type;
int output_format;
int output_standard;
int dsc_slices;
struct _vcs_dpi_display_output_params_st {
int dp_lanes;
int output_bpp;
int dsc_enable;
int wb_enable;
int num_active_wb;
int output_bpc;
int output_type;
int output_format;
int output_standard;
int dsc_slices;
struct writeback_st wb;
};
struct _vcs_dpi_display_bandwidth_st {
double total_bw_consumed_gbps;
double guaranteed_urgent_return_bw_gbps;
struct _vcs_dpi_display_bandwidth_st {
double total_bw_consumed_gbps;
double guaranteed_urgent_return_bw_gbps;
};
struct _vcs_dpi_scaler_ratio_depth_st {
double hscl_ratio;
double vscl_ratio;
double hscl_ratio_c;
double vscl_ratio_c;
double vinit;
double vinit_c;
double vinit_bot;
double vinit_bot_c;
int lb_depth;
int scl_enable;
struct _vcs_dpi_scaler_ratio_depth_st {
double hscl_ratio;
double vscl_ratio;
double hscl_ratio_c;
double vscl_ratio_c;
double vinit;
double vinit_c;
double vinit_bot;
double vinit_bot_c;
int lb_depth;
int scl_enable;
};
struct _vcs_dpi_scaler_taps_st {
unsigned int htaps;
unsigned int vtaps;
unsigned int htaps_c;
unsigned int vtaps_c;
struct _vcs_dpi_scaler_taps_st {
unsigned int htaps;
unsigned int vtaps;
unsigned int htaps_c;
unsigned int vtaps_c;
};
struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int recout_width;
unsigned int recout_height;
unsigned int full_recout_width;
unsigned int full_recout_height;
unsigned int hblank_start;
unsigned int hblank_end;
unsigned int vblank_start;
unsigned int vblank_end;
unsigned int htotal;
unsigned int vtotal;
unsigned int vactive;
unsigned int hactive;
unsigned int vstartup_start;
unsigned int vupdate_offset;
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned char interlaced;
unsigned char underscan;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
unsigned char odm_split_cnt;
unsigned char odm_combine;
struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int recout_width;
unsigned int recout_height;
unsigned int full_recout_width;
unsigned int full_recout_height;
unsigned int hblank_start;
unsigned int hblank_end;
unsigned int vblank_start;
unsigned int vblank_end;
unsigned int htotal;
unsigned int vtotal;
unsigned int vactive;
unsigned int hactive;
unsigned int vstartup_start;
unsigned int vupdate_offset;
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned char interlaced;
unsigned char underscan;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
unsigned char odm_split_cnt;
unsigned char odm_combine;
};
struct _vcs_dpi_display_pipe_params_st {
display_pipe_source_params_st src;
display_pipe_dest_params_st dest;
scaler_ratio_depth_st scale_ratio_depth;
scaler_taps_st scale_taps;
struct _vcs_dpi_display_pipe_params_st {
display_pipe_source_params_st src;
display_pipe_dest_params_st dest;
scaler_ratio_depth_st scale_ratio_depth;
scaler_taps_st scale_taps;
};
struct _vcs_dpi_display_clocks_and_cfg_st {
int voltage;
double dppclk_mhz;
double refclk_mhz;
double dispclk_mhz;
double dcfclk_mhz;
double socclk_mhz;
struct _vcs_dpi_display_clocks_and_cfg_st {
int voltage;
double dppclk_mhz;
double refclk_mhz;
double dispclk_mhz;
double dcfclk_mhz;
double socclk_mhz;
};
struct _vcs_dpi_display_e2e_pipe_params_st {
display_pipe_params_st pipe;
display_output_params_st dout;
display_clocks_and_cfg_st clks_cfg;
struct _vcs_dpi_display_e2e_pipe_params_st {
display_pipe_params_st pipe;
display_output_params_st dout;
display_clocks_and_cfg_st clks_cfg;
};
struct _vcs_dpi_dchub_buffer_sizing_st {
unsigned int swath_width_y;
unsigned int swath_height_y;
unsigned int swath_height_c;
unsigned int detail_buffer_size_y;
struct _vcs_dpi_dchub_buffer_sizing_st {
unsigned int swath_width_y;
unsigned int swath_height_y;
unsigned int swath_height_c;
unsigned int detail_buffer_size_y;
};
struct _vcs_dpi_watermarks_perf_st {
double stutter_eff_in_active_region_percent;
double urgent_latency_supported_us;
double non_urgent_latency_supported_us;
double dram_clock_change_margin_us;
double dram_access_eff_percent;
struct _vcs_dpi_watermarks_perf_st {
double stutter_eff_in_active_region_percent;
double urgent_latency_supported_us;
double non_urgent_latency_supported_us;
double dram_clock_change_margin_us;
double dram_access_eff_percent;
};
struct _vcs_dpi_cstate_pstate_watermarks_st {
double cstate_exit_us;
double cstate_enter_plus_exit_us;
double pstate_change_us;
struct _vcs_dpi_cstate_pstate_watermarks_st {
double cstate_exit_us;
double cstate_enter_plus_exit_us;
double pstate_change_us;
};
struct _vcs_dpi_wm_calc_pipe_params_st {
unsigned int num_dpp;
int voltage;
int output_type;
double dcfclk_mhz;
double socclk_mhz;
double dppclk_mhz;
double pixclk_mhz;
unsigned char interlace_en;
unsigned char pte_enable;
unsigned char dcc_enable;
double dcc_rate;
double bytes_per_pixel_c;
double bytes_per_pixel_y;
unsigned int swath_width_y;
unsigned int swath_height_y;
unsigned int swath_height_c;
unsigned int det_buffer_size_y;
double h_ratio;
double v_ratio;
unsigned int h_taps;
unsigned int h_total;
unsigned int v_total;
unsigned int v_active;
unsigned int e2e_index;
double display_pipe_line_delivery_time;
double read_bw;
unsigned int lines_in_det_y;
unsigned int lines_in_det_y_rounded_down_to_swath;
double full_det_buffering_time;
double dcfclk_deepsleep_mhz_per_plane;
struct _vcs_dpi_wm_calc_pipe_params_st {
unsigned int num_dpp;
int voltage;
int output_type;
double dcfclk_mhz;
double socclk_mhz;
double dppclk_mhz;
double pixclk_mhz;
unsigned char interlace_en;
unsigned char pte_enable;
unsigned char dcc_enable;
double dcc_rate;
double bytes_per_pixel_c;
double bytes_per_pixel_y;
unsigned int swath_width_y;
unsigned int swath_height_y;
unsigned int swath_height_c;
unsigned int det_buffer_size_y;
double h_ratio;
double v_ratio;
unsigned int h_taps;
unsigned int h_total;
unsigned int v_total;
unsigned int v_active;
unsigned int e2e_index;
double display_pipe_line_delivery_time;
double read_bw;
unsigned int lines_in_det_y;
unsigned int lines_in_det_y_rounded_down_to_swath;
double full_det_buffering_time;
double dcfclk_deepsleep_mhz_per_plane;
};
struct _vcs_dpi_vratio_pre_st {
double vratio_pre_l;
double vratio_pre_c;
struct _vcs_dpi_vratio_pre_st {
double vratio_pre_l;
double vratio_pre_c;
};
struct _vcs_dpi_display_data_rq_misc_params_st {
unsigned int full_swath_bytes;
unsigned int stored_swath_bytes;
unsigned int blk256_height;
unsigned int blk256_width;
unsigned int req_height;
unsigned int req_width;
struct _vcs_dpi_display_data_rq_misc_params_st {
unsigned int full_swath_bytes;
unsigned int stored_swath_bytes;
unsigned int blk256_height;
unsigned int blk256_width;
unsigned int req_height;
unsigned int req_width;
};
struct _vcs_dpi_display_data_rq_sizing_params_st {
unsigned int chunk_bytes;
unsigned int min_chunk_bytes;
unsigned int meta_chunk_bytes;
unsigned int min_meta_chunk_bytes;
unsigned int mpte_group_bytes;
unsigned int dpte_group_bytes;
struct _vcs_dpi_display_data_rq_sizing_params_st {
unsigned int chunk_bytes;
unsigned int min_chunk_bytes;
unsigned int meta_chunk_bytes;
unsigned int min_meta_chunk_bytes;
unsigned int mpte_group_bytes;
unsigned int dpte_group_bytes;
};
struct _vcs_dpi_display_data_rq_dlg_params_st {
unsigned int swath_width_ub;
unsigned int swath_height;
unsigned int req_per_swath_ub;
unsigned int meta_pte_bytes_per_frame_ub;
unsigned int dpte_req_per_row_ub;
unsigned int dpte_groups_per_row_ub;
unsigned int dpte_row_height;
unsigned int dpte_bytes_per_row_ub;
unsigned int meta_chunks_per_row_ub;
unsigned int meta_req_per_row_ub;
unsigned int meta_row_height;
unsigned int meta_bytes_per_row_ub;
struct _vcs_dpi_display_data_rq_dlg_params_st {
unsigned int swath_width_ub;
unsigned int swath_height;
unsigned int req_per_swath_ub;
unsigned int meta_pte_bytes_per_frame_ub;
unsigned int dpte_req_per_row_ub;
unsigned int dpte_groups_per_row_ub;
unsigned int dpte_row_height;
unsigned int dpte_bytes_per_row_ub;
unsigned int meta_chunks_per_row_ub;
unsigned int meta_req_per_row_ub;
unsigned int meta_row_height;
unsigned int meta_bytes_per_row_ub;
};
struct _vcs_dpi_display_cur_rq_dlg_params_st {
unsigned char enable;
unsigned int swath_height;
unsigned int req_per_line;
struct _vcs_dpi_display_cur_rq_dlg_params_st {
unsigned char enable;
unsigned int swath_height;
unsigned int req_per_line;
};
struct _vcs_dpi_display_rq_dlg_params_st {
display_data_rq_dlg_params_st rq_l;
display_data_rq_dlg_params_st rq_c;
display_cur_rq_dlg_params_st rq_cur0;
struct _vcs_dpi_display_rq_dlg_params_st {
display_data_rq_dlg_params_st rq_l;
display_data_rq_dlg_params_st rq_c;
display_cur_rq_dlg_params_st rq_cur0;
};
struct _vcs_dpi_display_rq_sizing_params_st {
display_data_rq_sizing_params_st rq_l;
display_data_rq_sizing_params_st rq_c;
struct _vcs_dpi_display_rq_sizing_params_st {
display_data_rq_sizing_params_st rq_l;
display_data_rq_sizing_params_st rq_c;
};
struct _vcs_dpi_display_rq_misc_params_st {
display_data_rq_misc_params_st rq_l;
display_data_rq_misc_params_st rq_c;
struct _vcs_dpi_display_rq_misc_params_st {
display_data_rq_misc_params_st rq_l;
display_data_rq_misc_params_st rq_c;
};
struct _vcs_dpi_display_rq_params_st {
unsigned char yuv420;
unsigned char yuv420_10bpc;
display_rq_misc_params_st misc;
display_rq_sizing_params_st sizing;
display_rq_dlg_params_st dlg;
struct _vcs_dpi_display_rq_params_st {
unsigned char yuv420;
unsigned char yuv420_10bpc;
display_rq_misc_params_st misc;
display_rq_sizing_params_st sizing;
display_rq_dlg_params_st dlg;
};
struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_h_blank_end;
unsigned int dlg_vblank_end;
unsigned int min_dst_y_next_start;
unsigned int refcyc_per_htotal;
unsigned int refcyc_x_after_scaler;
unsigned int dst_y_after_scaler;
unsigned int dst_y_prefetch;
unsigned int dst_y_per_vm_vblank;
unsigned int dst_y_per_row_vblank;
unsigned int dst_y_per_vm_flip;
unsigned int dst_y_per_row_flip;
unsigned int ref_freq_to_pix_freq;
unsigned int vratio_prefetch;
unsigned int vratio_prefetch_c;
unsigned int refcyc_per_pte_group_vblank_l;
unsigned int refcyc_per_pte_group_vblank_c;
unsigned int refcyc_per_meta_chunk_vblank_l;
unsigned int refcyc_per_meta_chunk_vblank_c;
unsigned int refcyc_per_pte_group_flip_l;
unsigned int refcyc_per_pte_group_flip_c;
unsigned int refcyc_per_meta_chunk_flip_l;
unsigned int refcyc_per_meta_chunk_flip_c;
unsigned int dst_y_per_pte_row_nom_l;
unsigned int dst_y_per_pte_row_nom_c;
unsigned int refcyc_per_pte_group_nom_l;
unsigned int refcyc_per_pte_group_nom_c;
unsigned int dst_y_per_meta_row_nom_l;
unsigned int dst_y_per_meta_row_nom_c;
unsigned int refcyc_per_meta_chunk_nom_l;
unsigned int refcyc_per_meta_chunk_nom_c;
unsigned int refcyc_per_line_delivery_pre_l;
unsigned int refcyc_per_line_delivery_pre_c;
unsigned int refcyc_per_line_delivery_l;
unsigned int refcyc_per_line_delivery_c;
unsigned int chunk_hdl_adjust_cur0;
unsigned int chunk_hdl_adjust_cur1;
unsigned int vready_after_vcount0;
unsigned int dst_y_offset_cur0;
unsigned int dst_y_offset_cur1;
unsigned int xfc_reg_transfer_delay;
unsigned int xfc_reg_precharge_delay;
unsigned int xfc_reg_remote_surface_flip_latency;
unsigned int xfc_reg_prefetch_margin;
unsigned int dst_y_delta_drq_limit;
struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_h_blank_end;
unsigned int dlg_vblank_end;
unsigned int min_dst_y_next_start;
unsigned int refcyc_per_htotal;
unsigned int refcyc_x_after_scaler;
unsigned int dst_y_after_scaler;
unsigned int dst_y_prefetch;
unsigned int dst_y_per_vm_vblank;
unsigned int dst_y_per_row_vblank;
unsigned int dst_y_per_vm_flip;
unsigned int dst_y_per_row_flip;
unsigned int ref_freq_to_pix_freq;
unsigned int vratio_prefetch;
unsigned int vratio_prefetch_c;
unsigned int refcyc_per_pte_group_vblank_l;
unsigned int refcyc_per_pte_group_vblank_c;
unsigned int refcyc_per_meta_chunk_vblank_l;
unsigned int refcyc_per_meta_chunk_vblank_c;
unsigned int refcyc_per_pte_group_flip_l;
unsigned int refcyc_per_pte_group_flip_c;
unsigned int refcyc_per_meta_chunk_flip_l;
unsigned int refcyc_per_meta_chunk_flip_c;
unsigned int dst_y_per_pte_row_nom_l;
unsigned int dst_y_per_pte_row_nom_c;
unsigned int refcyc_per_pte_group_nom_l;
unsigned int refcyc_per_pte_group_nom_c;
unsigned int dst_y_per_meta_row_nom_l;
unsigned int dst_y_per_meta_row_nom_c;
unsigned int refcyc_per_meta_chunk_nom_l;
unsigned int refcyc_per_meta_chunk_nom_c;
unsigned int refcyc_per_line_delivery_pre_l;
unsigned int refcyc_per_line_delivery_pre_c;
unsigned int refcyc_per_line_delivery_l;
unsigned int refcyc_per_line_delivery_c;
unsigned int chunk_hdl_adjust_cur0;
unsigned int chunk_hdl_adjust_cur1;
unsigned int vready_after_vcount0;
unsigned int dst_y_offset_cur0;
unsigned int dst_y_offset_cur1;
unsigned int xfc_reg_transfer_delay;
unsigned int xfc_reg_precharge_delay;
unsigned int xfc_reg_remote_surface_flip_latency;
unsigned int xfc_reg_prefetch_margin;
unsigned int dst_y_delta_drq_limit;
};
struct _vcs_dpi_display_ttu_regs_st {
unsigned int qos_level_low_wm;
unsigned int qos_level_high_wm;
unsigned int min_ttu_vblank;
unsigned int qos_level_flip;
unsigned int refcyc_per_req_delivery_l;
unsigned int refcyc_per_req_delivery_c;
unsigned int refcyc_per_req_delivery_cur0;
unsigned int refcyc_per_req_delivery_cur1;
unsigned int refcyc_per_req_delivery_pre_l;
unsigned int refcyc_per_req_delivery_pre_c;
unsigned int refcyc_per_req_delivery_pre_cur0;
unsigned int refcyc_per_req_delivery_pre_cur1;
unsigned int qos_level_fixed_l;
unsigned int qos_level_fixed_c;
unsigned int qos_level_fixed_cur0;
unsigned int qos_level_fixed_cur1;
unsigned int qos_ramp_disable_l;
unsigned int qos_ramp_disable_c;
unsigned int qos_ramp_disable_cur0;
unsigned int qos_ramp_disable_cur1;
struct _vcs_dpi_display_ttu_regs_st {
unsigned int qos_level_low_wm;
unsigned int qos_level_high_wm;
unsigned int min_ttu_vblank;
unsigned int qos_level_flip;
unsigned int refcyc_per_req_delivery_l;
unsigned int refcyc_per_req_delivery_c;
unsigned int refcyc_per_req_delivery_cur0;
unsigned int refcyc_per_req_delivery_cur1;
unsigned int refcyc_per_req_delivery_pre_l;
unsigned int refcyc_per_req_delivery_pre_c;
unsigned int refcyc_per_req_delivery_pre_cur0;
unsigned int refcyc_per_req_delivery_pre_cur1;
unsigned int qos_level_fixed_l;
unsigned int qos_level_fixed_c;
unsigned int qos_level_fixed_cur0;
unsigned int qos_level_fixed_cur1;
unsigned int qos_ramp_disable_l;
unsigned int qos_ramp_disable_c;
unsigned int qos_ramp_disable_cur0;
unsigned int qos_ramp_disable_cur1;
};
struct _vcs_dpi_display_data_rq_regs_st {
unsigned int chunk_size;
unsigned int min_chunk_size;
unsigned int meta_chunk_size;
unsigned int min_meta_chunk_size;
unsigned int dpte_group_size;
unsigned int mpte_group_size;
unsigned int swath_height;
unsigned int pte_row_height_linear;
struct _vcs_dpi_display_data_rq_regs_st {
unsigned int chunk_size;
unsigned int min_chunk_size;
unsigned int meta_chunk_size;
unsigned int min_meta_chunk_size;
unsigned int dpte_group_size;
unsigned int mpte_group_size;
unsigned int swath_height;
unsigned int pte_row_height_linear;
};
struct _vcs_dpi_display_rq_regs_st {
display_data_rq_regs_st rq_regs_l;
display_data_rq_regs_st rq_regs_c;
unsigned int drq_expansion_mode;
unsigned int prq_expansion_mode;
unsigned int mrq_expansion_mode;
unsigned int crq_expansion_mode;
unsigned int plane1_base_address;
struct _vcs_dpi_display_rq_regs_st {
display_data_rq_regs_st rq_regs_l;
display_data_rq_regs_st rq_regs_c;
unsigned int drq_expansion_mode;
unsigned int prq_expansion_mode;
unsigned int mrq_expansion_mode;
unsigned int crq_expansion_mode;
unsigned int plane1_base_address;
};
struct _vcs_dpi_display_dlg_sys_params_st {
double t_mclk_wm_us;
double t_urg_wm_us;
double t_sr_wm_us;
double t_extra_us;
double mem_trip_us;
double t_srx_delay_us;
double deepsleep_dcfclk_mhz;
double total_flip_bw;
unsigned int total_flip_bytes;
struct _vcs_dpi_display_dlg_sys_params_st {
double t_mclk_wm_us;
double t_urg_wm_us;
double t_sr_wm_us;
double t_extra_us;
double mem_trip_us;
double t_srx_delay_us;
double deepsleep_dcfclk_mhz;
double total_flip_bw;
unsigned int total_flip_bytes;
};
struct _vcs_dpi_display_dlg_prefetch_param_st {
double prefetch_bw;
unsigned int flip_bytes;
struct _vcs_dpi_display_dlg_prefetch_param_st {
double prefetch_bw;
unsigned int flip_bytes;
};
struct _vcs_dpi_display_pipe_clock_st {
double dcfclk_mhz;
double dispclk_mhz;
double socclk_mhz;
double dscclk_mhz[6];
double dppclk_mhz[6];
struct _vcs_dpi_display_pipe_clock_st {
double dcfclk_mhz;
double dispclk_mhz;
double socclk_mhz;
double dscclk_mhz[6];
double dppclk_mhz[6];
};
struct _vcs_dpi_display_arb_params_st {
int max_req_outstanding;
int min_req_outstanding;
int sat_level_us;
struct _vcs_dpi_display_arb_params_st {
int max_req_outstanding;
int min_req_outstanding;
int sat_level_us;
};
#endif /*__DISPLAY_MODE_STRUCTS_H__*/

View File

@ -35,6 +35,16 @@ static inline double dml_min(double a, double b)
return (double) dcn_bw_min2(a, b);
}
static inline double dml_min3(double a, double b, double c)
{
return dml_min(dml_min(a, b), c);
}
static inline double dml_min4(double a, double b, double c, double d)
{
return dml_min(dml_min(a, b), dml_min(c, d));
}
static inline double dml_max(double a, double b)
{
return (double) dcn_bw_max2(a, b);

View File

@ -75,9 +75,7 @@ bool dal_hw_factory_init(
return true;
case DCE_VERSION_11_0:
case DCE_VERSION_11_2:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case DCE_VERSION_11_22:
#endif
dal_hw_factory_dce110_init(factory);
return true;
case DCE_VERSION_12_0:

View File

@ -72,9 +72,7 @@ bool dal_hw_translate_init(
case DCE_VERSION_10_0:
case DCE_VERSION_11_0:
case DCE_VERSION_11_2:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case DCE_VERSION_11_22:
#endif
dal_hw_translate_dce110_init(translate);
return true;
case DCE_VERSION_12_0:

View File

@ -83,9 +83,7 @@ struct i2caux *dal_i2caux_create(
case DCE_VERSION_8_3:
return dal_i2caux_dce80_create(ctx);
case DCE_VERSION_11_2:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case DCE_VERSION_11_22:
#endif
return dal_i2caux_dce112_create(ctx);
case DCE_VERSION_11_0:
return dal_i2caux_dce110_create(ctx);

View File

@ -43,9 +43,7 @@ enum bw_calcs_version {
BW_CALCS_VERSION_POLARIS10,
BW_CALCS_VERSION_POLARIS11,
BW_CALCS_VERSION_POLARIS12,
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
BW_CALCS_VERSION_VEGAM,
#endif
BW_CALCS_VERSION_STONEY,
BW_CALCS_VERSION_VEGA10
};

Some files were not shown because too many files have changed in this diff Show More