mirror of https://gitee.com/openkylin/linux.git
Merge branch 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux into drm-next
First feature request for 4.16. Highlights: - RV and Vega header cleanups - TTM operation context support - 48 bit GPUVM fixes for Vega/RV - More smatch fixes - ECC support for vega10 - Resizeable BAR support - Multi-display sync support in DC - SR-IOV fixes - Various scheduler improvements - GPU reset fixes and vram lost tracking - Clean up DC/powerplay interfaces - DCN display fixes - Various DC fixes * 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux: (291 commits) drm/radeon: Use drm_fb_helper_lastclose() and _poll_changed() drm/amdgpu: Use drm_fb_helper_lastclose() and _poll_changed() drm/amd/display: Use drm_fb_helper_poll_changed() drm/ttm: swap consecutive allocated pooled pages v4 drm/amdgpu: fix amdgpu_sync_resv v2 drm/ttm: swap consecutive allocated cached pages v3 drm/amd/amdgpu: set gtt size according to system memory size only drm/amdgpu: Get rid of dep_sync as a seperate object. drm/amdgpu: allow specifying vm_block_size for multi level PDs v2 drm/amdgpu: move validation of the VM size into the VM code drm/amdgpu: allow non pot VM size values drm/amdgpu: choose number of VM levels based on VM size drm/amdgpu: unify VM size handling of Vega10 with older generation drm/amdgpu: fix amdgpu_vm_num_entries drm/amdgpu: fix VM PD addr shift drm/amdgpu: correct vce4.0 fw config for SRIOV (V2) drm/amd/display: Don't call dm_log_to_buffer directly in dc_conn_log drm/amd/display: Add dm_logger_append_va API drm/ttm: Use a static string instead of an array of char * drm/amd/display: remove usage of legacy_cursor_update ...
This commit is contained in:
commit
9c606cd411
|
@ -47,6 +47,8 @@
|
||||||
#include <drm/amdgpu_drm.h>
|
#include <drm/amdgpu_drm.h>
|
||||||
|
|
||||||
#include <kgd_kfd_interface.h>
|
#include <kgd_kfd_interface.h>
|
||||||
|
#include "dm_pp_interface.h"
|
||||||
|
#include "kgd_pp_interface.h"
|
||||||
|
|
||||||
#include "amd_shared.h"
|
#include "amd_shared.h"
|
||||||
#include "amdgpu_mode.h"
|
#include "amdgpu_mode.h"
|
||||||
|
@ -59,7 +61,6 @@
|
||||||
#include "amdgpu_sync.h"
|
#include "amdgpu_sync.h"
|
||||||
#include "amdgpu_ring.h"
|
#include "amdgpu_ring.h"
|
||||||
#include "amdgpu_vm.h"
|
#include "amdgpu_vm.h"
|
||||||
#include "amd_powerplay.h"
|
|
||||||
#include "amdgpu_dpm.h"
|
#include "amdgpu_dpm.h"
|
||||||
#include "amdgpu_acp.h"
|
#include "amdgpu_acp.h"
|
||||||
#include "amdgpu_uvd.h"
|
#include "amdgpu_uvd.h"
|
||||||
|
@ -67,11 +68,11 @@
|
||||||
#include "amdgpu_vcn.h"
|
#include "amdgpu_vcn.h"
|
||||||
#include "amdgpu_mn.h"
|
#include "amdgpu_mn.h"
|
||||||
#include "amdgpu_dm.h"
|
#include "amdgpu_dm.h"
|
||||||
|
|
||||||
#include "gpu_scheduler.h"
|
#include "gpu_scheduler.h"
|
||||||
#include "amdgpu_virt.h"
|
#include "amdgpu_virt.h"
|
||||||
#include "amdgpu_gart.h"
|
#include "amdgpu_gart.h"
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Modules parameters.
|
* Modules parameters.
|
||||||
*/
|
*/
|
||||||
|
@ -177,6 +178,10 @@ extern int amdgpu_cik_support;
|
||||||
#define CIK_CURSOR_WIDTH 128
|
#define CIK_CURSOR_WIDTH 128
|
||||||
#define CIK_CURSOR_HEIGHT 128
|
#define CIK_CURSOR_HEIGHT 128
|
||||||
|
|
||||||
|
/* GPU RESET flags */
|
||||||
|
#define AMDGPU_RESET_INFO_VRAM_LOST (1 << 0)
|
||||||
|
#define AMDGPU_RESET_INFO_FULLRESET (1 << 1)
|
||||||
|
|
||||||
struct amdgpu_device;
|
struct amdgpu_device;
|
||||||
struct amdgpu_ib;
|
struct amdgpu_ib;
|
||||||
struct amdgpu_cs_parser;
|
struct amdgpu_cs_parser;
|
||||||
|
@ -735,6 +740,7 @@ struct amdgpu_ctx {
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
struct amdgpu_queue_mgr queue_mgr;
|
struct amdgpu_queue_mgr queue_mgr;
|
||||||
unsigned reset_counter;
|
unsigned reset_counter;
|
||||||
|
unsigned reset_counter_query;
|
||||||
uint32_t vram_lost_counter;
|
uint32_t vram_lost_counter;
|
||||||
spinlock_t ring_lock;
|
spinlock_t ring_lock;
|
||||||
struct dma_fence **fences;
|
struct dma_fence **fences;
|
||||||
|
@ -743,6 +749,7 @@ struct amdgpu_ctx {
|
||||||
enum amd_sched_priority init_priority;
|
enum amd_sched_priority init_priority;
|
||||||
enum amd_sched_priority override_priority;
|
enum amd_sched_priority override_priority;
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
|
atomic_t guilty;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_ctx_mgr {
|
struct amdgpu_ctx_mgr {
|
||||||
|
@ -1114,7 +1121,6 @@ struct amdgpu_job {
|
||||||
struct amdgpu_vm *vm;
|
struct amdgpu_vm *vm;
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct amdgpu_sync sync;
|
struct amdgpu_sync sync;
|
||||||
struct amdgpu_sync dep_sync;
|
|
||||||
struct amdgpu_sync sched_sync;
|
struct amdgpu_sync sched_sync;
|
||||||
struct amdgpu_ib *ibs;
|
struct amdgpu_ib *ibs;
|
||||||
struct dma_fence *fence; /* the hw fence */
|
struct dma_fence *fence; /* the hw fence */
|
||||||
|
@ -1405,6 +1411,7 @@ struct amdgpu_fw_vram_usage {
|
||||||
};
|
};
|
||||||
|
|
||||||
int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
|
int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
|
||||||
|
void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CGS
|
* CGS
|
||||||
|
@ -1421,6 +1428,13 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
||||||
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
||||||
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
|
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
|
||||||
|
|
||||||
|
struct amd_powerplay {
|
||||||
|
struct cgs_device *cgs_device;
|
||||||
|
void *pp_handle;
|
||||||
|
const struct amd_ip_funcs *ip_funcs;
|
||||||
|
const struct amd_pm_funcs *pp_funcs;
|
||||||
|
};
|
||||||
|
|
||||||
#define AMDGPU_RESET_MAGIC_NUM 64
|
#define AMDGPU_RESET_MAGIC_NUM 64
|
||||||
struct amdgpu_device {
|
struct amdgpu_device {
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
@ -1616,9 +1630,6 @@ struct amdgpu_device {
|
||||||
/* link all shadow bo */
|
/* link all shadow bo */
|
||||||
struct list_head shadow_list;
|
struct list_head shadow_list;
|
||||||
struct mutex shadow_list_lock;
|
struct mutex shadow_list_lock;
|
||||||
/* link all gtt */
|
|
||||||
spinlock_t gtt_list_lock;
|
|
||||||
struct list_head gtt_list;
|
|
||||||
/* keep an lru list of rings by HW IP */
|
/* keep an lru list of rings by HW IP */
|
||||||
struct list_head ring_lru_list;
|
struct list_head ring_lru_list;
|
||||||
spinlock_t ring_lru_list_lock;
|
spinlock_t ring_lru_list_lock;
|
||||||
|
@ -1629,7 +1640,8 @@ struct amdgpu_device {
|
||||||
|
|
||||||
/* record last mm index being written through WREG32*/
|
/* record last mm index being written through WREG32*/
|
||||||
unsigned long last_mm_index;
|
unsigned long last_mm_index;
|
||||||
bool in_sriov_reset;
|
bool in_gpu_reset;
|
||||||
|
struct mutex lock_reset;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
|
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
|
||||||
|
@ -1823,7 +1835,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
||||||
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
|
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
|
||||||
|
|
||||||
/* Common functions */
|
/* Common functions */
|
||||||
int amdgpu_gpu_reset(struct amdgpu_device *adev);
|
int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job);
|
||||||
bool amdgpu_need_backup(struct amdgpu_device *adev);
|
bool amdgpu_need_backup(struct amdgpu_device *adev);
|
||||||
void amdgpu_pci_config_reset(struct amdgpu_device *adev);
|
void amdgpu_pci_config_reset(struct amdgpu_device *adev);
|
||||||
bool amdgpu_need_post(struct amdgpu_device *adev);
|
bool amdgpu_need_post(struct amdgpu_device *adev);
|
||||||
|
@ -1835,6 +1847,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
|
||||||
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
||||||
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
|
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
|
||||||
void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
|
void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
|
||||||
|
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
|
||||||
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
|
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
|
||||||
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_ttm_fini(struct amdgpu_device *adev);
|
void amdgpu_ttm_fini(struct amdgpu_device *adev);
|
||||||
|
|
|
@ -85,7 +85,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
|
||||||
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
|
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
dev_info(adev->dev, "kfd not supported on this ASIC\n");
|
dev_dbg(adev->dev, "kfd not supported on this ASIC\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -690,12 +690,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
|
||||||
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
|
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
|
||||||
/* set a reasonable default for DP */
|
/* set a reasonable default for DP */
|
||||||
if (adev->clock.default_dispclk < 53900) {
|
if (adev->clock.default_dispclk < 53900) {
|
||||||
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
|
DRM_DEBUG("Changing default dispclk from %dMhz to 600Mhz\n",
|
||||||
adev->clock.default_dispclk / 100);
|
adev->clock.default_dispclk / 100);
|
||||||
adev->clock.default_dispclk = 60000;
|
adev->clock.default_dispclk = 60000;
|
||||||
} else if (adev->clock.default_dispclk <= 60000) {
|
} else if (adev->clock.default_dispclk <= 60000) {
|
||||||
DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
|
DRM_DEBUG("Changing default dispclk from %dMhz to 625Mhz\n",
|
||||||
adev->clock.default_dispclk / 100);
|
adev->clock.default_dispclk / 100);
|
||||||
adev->clock.default_dispclk = 62500;
|
adev->clock.default_dispclk = 62500;
|
||||||
}
|
}
|
||||||
adev->clock.dp_extclk =
|
adev->clock.dp_extclk =
|
||||||
|
|
|
@ -948,7 +948,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
|
||||||
(amdgpu_crtc->v_border * 2);
|
(amdgpu_crtc->v_border * 2);
|
||||||
mode_info->vblank_time_us = vblank_lines * line_time_us;
|
mode_info->vblank_time_us = vblank_lines * line_time_us;
|
||||||
mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
|
mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
|
||||||
mode_info->ref_clock = adev->clock.spll.reference_freq;
|
|
||||||
mode_info = NULL;
|
mode_info = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -958,7 +957,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
|
||||||
if (mode_info != NULL) {
|
if (mode_info != NULL) {
|
||||||
mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
|
mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
|
||||||
mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
|
mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
|
||||||
mode_info->ref_clock = adev->clock.spll.reference_freq;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -90,6 +90,12 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
||||||
goto free_chunk;
|
goto free_chunk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* skip guilty context job */
|
||||||
|
if (atomic_read(&p->ctx->guilty) == 1) {
|
||||||
|
ret = -ECANCELED;
|
||||||
|
goto free_chunk;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&p->ctx->lock);
|
mutex_lock(&p->ctx->lock);
|
||||||
|
|
||||||
/* get chunks */
|
/* get chunks */
|
||||||
|
@ -337,7 +343,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
||||||
struct amdgpu_bo *bo)
|
struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||||
u64 initial_bytes_moved, bytes_moved;
|
struct ttm_operation_ctx ctx = { true, false };
|
||||||
uint32_t domain;
|
uint32_t domain;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
@ -367,15 +373,13 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||||
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
|
||||||
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
|
p->bytes_moved += ctx.bytes_moved;
|
||||||
initial_bytes_moved;
|
|
||||||
p->bytes_moved += bytes_moved;
|
|
||||||
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
|
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
|
||||||
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||||
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
|
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
|
||||||
p->bytes_moved_vis += bytes_moved;
|
p->bytes_moved_vis += ctx.bytes_moved;
|
||||||
|
|
||||||
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
||||||
domain = bo->allowed_domains;
|
domain = bo->allowed_domains;
|
||||||
|
@ -390,6 +394,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
||||||
struct amdgpu_bo *validated)
|
struct amdgpu_bo *validated)
|
||||||
{
|
{
|
||||||
uint32_t domain = validated->allowed_domains;
|
uint32_t domain = validated->allowed_domains;
|
||||||
|
struct ttm_operation_ctx ctx = { true, false };
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (!p->evictable)
|
if (!p->evictable)
|
||||||
|
@ -431,7 +436,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
||||||
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||||
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
|
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||||
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||||
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
|
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
|
||||||
initial_bytes_moved;
|
initial_bytes_moved;
|
||||||
p->bytes_moved += bytes_moved;
|
p->bytes_moved += bytes_moved;
|
||||||
|
@ -470,6 +475,7 @@ static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
|
||||||
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||||
struct list_head *validated)
|
struct list_head *validated)
|
||||||
{
|
{
|
||||||
|
struct ttm_operation_ctx ctx = { true, false };
|
||||||
struct amdgpu_bo_list_entry *lobj;
|
struct amdgpu_bo_list_entry *lobj;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
@ -487,8 +493,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||||
lobj->user_pages) {
|
lobj->user_pages) {
|
||||||
amdgpu_ttm_placement_from_domain(bo,
|
amdgpu_ttm_placement_from_domain(bo,
|
||||||
AMDGPU_GEM_DOMAIN_CPU);
|
AMDGPU_GEM_DOMAIN_CPU);
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||||
false);
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
|
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
|
||||||
|
@ -678,7 +683,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||||
if (!r && p->uf_entry.robj) {
|
if (!r && p->uf_entry.robj) {
|
||||||
struct amdgpu_bo *uf = p->uf_entry.robj;
|
struct amdgpu_bo *uf = p->uf_entry.robj;
|
||||||
|
|
||||||
r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem);
|
r = amdgpu_ttm_alloc_gart(&uf->tbo);
|
||||||
p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
|
p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -781,7 +786,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_sync_fence(adev, &p->job->sync,
|
r = amdgpu_sync_fence(adev, &p->job->sync,
|
||||||
fpriv->prt_va->last_pt_update);
|
fpriv->prt_va->last_pt_update, false);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -795,7 +800,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
f = bo_va->last_pt_update;
|
f = bo_va->last_pt_update;
|
||||||
r = amdgpu_sync_fence(adev, &p->job->sync, f);
|
r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -818,7 +823,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
f = bo_va->last_pt_update;
|
f = bo_va->last_pt_update;
|
||||||
r = amdgpu_sync_fence(adev, &p->job->sync, f);
|
r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -829,7 +834,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
|
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -865,8 +870,8 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
||||||
struct amdgpu_bo_va_mapping *m;
|
struct amdgpu_bo_va_mapping *m;
|
||||||
struct amdgpu_bo *aobj = NULL;
|
struct amdgpu_bo *aobj = NULL;
|
||||||
struct amdgpu_cs_chunk *chunk;
|
struct amdgpu_cs_chunk *chunk;
|
||||||
|
uint64_t offset, va_start;
|
||||||
struct amdgpu_ib *ib;
|
struct amdgpu_ib *ib;
|
||||||
uint64_t offset;
|
|
||||||
uint8_t *kptr;
|
uint8_t *kptr;
|
||||||
|
|
||||||
chunk = &p->chunks[i];
|
chunk = &p->chunks[i];
|
||||||
|
@ -876,14 +881,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
||||||
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
|
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
r = amdgpu_cs_find_mapping(p, chunk_ib->va_start,
|
va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
|
||||||
&aobj, &m);
|
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("IB va_start is invalid\n");
|
DRM_ERROR("IB va_start is invalid\n");
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
|
if ((va_start + chunk_ib->ib_bytes) >
|
||||||
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
|
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
|
||||||
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
|
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -896,7 +901,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
|
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
|
||||||
kptr += chunk_ib->va_start - offset;
|
kptr += va_start - offset;
|
||||||
|
|
||||||
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
|
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
|
||||||
amdgpu_bo_kunmap(aobj);
|
amdgpu_bo_kunmap(aobj);
|
||||||
|
@ -1033,8 +1038,8 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
|
||||||
amdgpu_ctx_put(ctx);
|
amdgpu_ctx_put(ctx);
|
||||||
return r;
|
return r;
|
||||||
} else if (fence) {
|
} else if (fence) {
|
||||||
r = amdgpu_sync_fence(p->adev, &p->job->sync,
|
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
|
||||||
fence);
|
true);
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
amdgpu_ctx_put(ctx);
|
amdgpu_ctx_put(ctx);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -1053,7 +1058,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence);
|
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
@ -1194,11 +1199,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||||
job->uf_sequence = seq;
|
job->uf_sequence = seq;
|
||||||
|
|
||||||
amdgpu_job_free_resources(job);
|
amdgpu_job_free_resources(job);
|
||||||
amdgpu_ring_priority_get(job->ring,
|
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
|
||||||
amd_sched_get_job_priority(&job->base));
|
|
||||||
|
|
||||||
trace_amdgpu_cs_ioctl(job);
|
trace_amdgpu_cs_ioctl(job);
|
||||||
amd_sched_entity_push_job(&job->base);
|
amd_sched_entity_push_job(&job->base, entity);
|
||||||
|
|
||||||
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
|
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
|
||||||
amdgpu_mn_unlock(p->mn);
|
amdgpu_mn_unlock(p->mn);
|
||||||
|
@ -1570,6 +1574,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||||
struct amdgpu_bo_va_mapping **map)
|
struct amdgpu_bo_va_mapping **map)
|
||||||
{
|
{
|
||||||
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
||||||
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
struct amdgpu_vm *vm = &fpriv->vm;
|
struct amdgpu_vm *vm = &fpriv->vm;
|
||||||
struct amdgpu_bo_va_mapping *mapping;
|
struct amdgpu_bo_va_mapping *mapping;
|
||||||
int r;
|
int r;
|
||||||
|
@ -1590,11 +1595,10 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||||
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
|
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
|
||||||
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||||
amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
|
amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
|
||||||
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
|
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
|
||||||
false);
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
return amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
|
return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,6 +75,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
|
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
|
||||||
|
ctx->reset_counter_query = ctx->reset_counter;
|
||||||
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
|
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
|
||||||
ctx->init_priority = priority;
|
ctx->init_priority = priority;
|
||||||
ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
|
ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
|
||||||
|
@ -90,7 +91,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
|
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
|
||||||
rq, amdgpu_sched_jobs);
|
rq, amdgpu_sched_jobs, &ctx->guilty);
|
||||||
if (r)
|
if (r)
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
@ -216,11 +217,45 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
|
||||||
/* determine if a GPU reset has occured since the last call */
|
/* determine if a GPU reset has occured since the last call */
|
||||||
reset_counter = atomic_read(&adev->gpu_reset_counter);
|
reset_counter = atomic_read(&adev->gpu_reset_counter);
|
||||||
/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
|
/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
|
||||||
if (ctx->reset_counter == reset_counter)
|
if (ctx->reset_counter_query == reset_counter)
|
||||||
out->state.reset_status = AMDGPU_CTX_NO_RESET;
|
out->state.reset_status = AMDGPU_CTX_NO_RESET;
|
||||||
else
|
else
|
||||||
out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
|
out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
|
||||||
ctx->reset_counter = reset_counter;
|
ctx->reset_counter_query = reset_counter;
|
||||||
|
|
||||||
|
mutex_unlock(&mgr->lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_ctx_query2(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_fpriv *fpriv, uint32_t id,
|
||||||
|
union drm_amdgpu_ctx_out *out)
|
||||||
|
{
|
||||||
|
struct amdgpu_ctx *ctx;
|
||||||
|
struct amdgpu_ctx_mgr *mgr;
|
||||||
|
|
||||||
|
if (!fpriv)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
mgr = &fpriv->ctx_mgr;
|
||||||
|
mutex_lock(&mgr->lock);
|
||||||
|
ctx = idr_find(&mgr->ctx_handles, id);
|
||||||
|
if (!ctx) {
|
||||||
|
mutex_unlock(&mgr->lock);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
out->state.flags = 0x0;
|
||||||
|
out->state.hangs = 0x0;
|
||||||
|
|
||||||
|
if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
|
||||||
|
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
|
||||||
|
|
||||||
|
if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
|
||||||
|
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
|
||||||
|
|
||||||
|
if (atomic_read(&ctx->guilty))
|
||||||
|
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
|
||||||
|
|
||||||
mutex_unlock(&mgr->lock);
|
mutex_unlock(&mgr->lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -257,6 +292,9 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
|
||||||
case AMDGPU_CTX_OP_QUERY_STATE:
|
case AMDGPU_CTX_OP_QUERY_STATE:
|
||||||
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
|
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
|
||||||
break;
|
break;
|
||||||
|
case AMDGPU_CTX_OP_QUERY_STATE2:
|
||||||
|
r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -410,6 +410,9 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* doorbell bar mapping */
|
/* doorbell bar mapping */
|
||||||
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
|
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
|
||||||
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
|
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
|
||||||
|
@ -575,41 +578,13 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
|
||||||
* @base: base address at which to put VRAM
|
* @base: base address at which to put VRAM
|
||||||
*
|
*
|
||||||
* Function will try to place VRAM at base address provided
|
* Function will try to place VRAM at base address provided
|
||||||
* as parameter (which is so far either PCI aperture address or
|
* as parameter.
|
||||||
* for IGP TOM base address).
|
|
||||||
*
|
|
||||||
* If there is not enough space to fit the unvisible VRAM in the 32bits
|
|
||||||
* address space then we limit the VRAM size to the aperture.
|
|
||||||
*
|
|
||||||
* Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
|
|
||||||
* this shouldn't be a problem as we are using the PCI aperture as a reference.
|
|
||||||
* Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
|
|
||||||
* not IGP.
|
|
||||||
*
|
|
||||||
* Note: we use mc_vram_size as on some board we need to program the mc to
|
|
||||||
* cover the whole aperture even if VRAM size is inferior to aperture size
|
|
||||||
* Novell bug 204882 + along with lots of ubuntu ones
|
|
||||||
*
|
|
||||||
* Note: when limiting vram it's safe to overwritte real_vram_size because
|
|
||||||
* we are not in case where real_vram_size is inferior to mc_vram_size (ie
|
|
||||||
* note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
|
|
||||||
* ones)
|
|
||||||
*
|
|
||||||
* Note: IGP TOM addr should be the same as the aperture addr, we don't
|
|
||||||
* explicitly check for that though.
|
|
||||||
*
|
|
||||||
* FIXME: when reducing VRAM size align new size on power of 2.
|
|
||||||
*/
|
*/
|
||||||
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
|
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
|
||||||
{
|
{
|
||||||
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
|
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
|
||||||
|
|
||||||
mc->vram_start = base;
|
mc->vram_start = base;
|
||||||
if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
|
|
||||||
dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
|
|
||||||
mc->real_vram_size = mc->aper_size;
|
|
||||||
mc->mc_vram_size = mc->aper_size;
|
|
||||||
}
|
|
||||||
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
|
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
|
||||||
if (limit && limit < mc->real_vram_size)
|
if (limit && limit < mc->real_vram_size)
|
||||||
mc->real_vram_size = limit;
|
mc->real_vram_size = limit;
|
||||||
|
@ -647,7 +622,10 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
|
||||||
dev_warn(adev->dev, "limiting GTT\n");
|
dev_warn(adev->dev, "limiting GTT\n");
|
||||||
mc->gart_size = size_af;
|
mc->gart_size = size_af;
|
||||||
}
|
}
|
||||||
mc->gart_start = mc->vram_end + 1;
|
/* VCE doesn't like it when BOs cross a 4GB segment, so align
|
||||||
|
* the GART base on a 4GB boundary as well.
|
||||||
|
*/
|
||||||
|
mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
|
||||||
}
|
}
|
||||||
mc->gart_end = mc->gart_start + mc->gart_size - 1;
|
mc->gart_end = mc->gart_start + mc->gart_size - 1;
|
||||||
dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
|
dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
|
||||||
|
@ -679,9 +657,13 @@ void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
|
int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
int r = 0;
|
int r = 0;
|
||||||
u64 gpu_addr;
|
int i;
|
||||||
u64 vram_size = adev->mc.visible_vram_size;
|
u64 vram_size = adev->mc.visible_vram_size;
|
||||||
|
u64 offset = adev->fw_vram_usage.start_offset;
|
||||||
|
u64 size = adev->fw_vram_usage.size;
|
||||||
|
struct amdgpu_bo *bo;
|
||||||
|
|
||||||
adev->fw_vram_usage.va = NULL;
|
adev->fw_vram_usage.va = NULL;
|
||||||
adev->fw_vram_usage.reserved_bo = NULL;
|
adev->fw_vram_usage.reserved_bo = NULL;
|
||||||
|
@ -690,7 +672,7 @@ int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
|
||||||
adev->fw_vram_usage.size <= vram_size) {
|
adev->fw_vram_usage.size <= vram_size) {
|
||||||
|
|
||||||
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
|
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
|
||||||
PAGE_SIZE, true, 0,
|
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
|
||||||
&adev->fw_vram_usage.reserved_bo);
|
&adev->fw_vram_usage.reserved_bo);
|
||||||
|
@ -700,11 +682,28 @@ int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
|
||||||
r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
|
r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_reserve;
|
goto error_reserve;
|
||||||
|
|
||||||
|
/* remove the original mem node and create a new one at the
|
||||||
|
* request position
|
||||||
|
*/
|
||||||
|
bo = adev->fw_vram_usage.reserved_bo;
|
||||||
|
offset = ALIGN(offset, PAGE_SIZE);
|
||||||
|
for (i = 0; i < bo->placement.num_placement; ++i) {
|
||||||
|
bo->placements[i].fpfn = offset >> PAGE_SHIFT;
|
||||||
|
bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
|
||||||
|
}
|
||||||
|
|
||||||
|
ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
|
||||||
|
r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
|
||||||
|
&bo->tbo.mem, &ctx);
|
||||||
|
if (r)
|
||||||
|
goto error_pin;
|
||||||
|
|
||||||
r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
|
r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
adev->fw_vram_usage.start_offset,
|
adev->fw_vram_usage.start_offset,
|
||||||
(adev->fw_vram_usage.start_offset +
|
(adev->fw_vram_usage.start_offset +
|
||||||
adev->fw_vram_usage.size), &gpu_addr);
|
adev->fw_vram_usage.size), NULL);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_pin;
|
goto error_pin;
|
||||||
r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
|
r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
|
||||||
|
@ -728,6 +727,75 @@ int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_device_resize_fb_bar - try to resize FB BAR
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
*
|
||||||
|
* Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
|
||||||
|
* to fail, but if any of the BARs is not accessible after the size we abort
|
||||||
|
* driver loading by returning -ENODEV.
|
||||||
|
*/
|
||||||
|
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size);
|
||||||
|
u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
|
||||||
|
struct pci_bus *root;
|
||||||
|
struct resource *res;
|
||||||
|
unsigned i;
|
||||||
|
u16 cmd;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
/* Bypass for VF */
|
||||||
|
if (amdgpu_sriov_vf(adev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* Check if the root BUS has 64bit memory resources */
|
||||||
|
root = adev->pdev->bus;
|
||||||
|
while (root->parent)
|
||||||
|
root = root->parent;
|
||||||
|
|
||||||
|
pci_bus_for_each_resource(root, res, i) {
|
||||||
|
if (res && res->flags & IORESOURCE_MEM_64 &&
|
||||||
|
res->start > 0x100000000ull)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Trying to resize is pointless without a root hub window above 4GB */
|
||||||
|
if (!res)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* Disable memory decoding while we change the BAR addresses and size */
|
||||||
|
pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
|
||||||
|
pci_write_config_word(adev->pdev, PCI_COMMAND,
|
||||||
|
cmd & ~PCI_COMMAND_MEMORY);
|
||||||
|
|
||||||
|
/* Free the VRAM and doorbell BAR, we most likely need to move both. */
|
||||||
|
amdgpu_doorbell_fini(adev);
|
||||||
|
if (adev->asic_type >= CHIP_BONAIRE)
|
||||||
|
pci_release_resource(adev->pdev, 2);
|
||||||
|
|
||||||
|
pci_release_resource(adev->pdev, 0);
|
||||||
|
|
||||||
|
r = pci_resize_resource(adev->pdev, 0, rbar_size);
|
||||||
|
if (r == -ENOSPC)
|
||||||
|
DRM_INFO("Not enough PCI address space for a large BAR.");
|
||||||
|
else if (r && r != -ENOTSUPP)
|
||||||
|
DRM_ERROR("Problem resizing BAR0 (%d).", r);
|
||||||
|
|
||||||
|
pci_assign_unassigned_bus_resources(adev->pdev->bus);
|
||||||
|
|
||||||
|
/* When the doorbell or fb BAR isn't available we have no chance of
|
||||||
|
* using the device.
|
||||||
|
*/
|
||||||
|
r = amdgpu_doorbell_init(adev);
|
||||||
|
if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GPU helpers function.
|
* GPU helpers function.
|
||||||
|
@ -1029,7 +1097,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
|
||||||
atom_card_info->ioreg_read = cail_ioreg_read;
|
atom_card_info->ioreg_read = cail_ioreg_read;
|
||||||
atom_card_info->ioreg_write = cail_ioreg_write;
|
atom_card_info->ioreg_write = cail_ioreg_write;
|
||||||
} else {
|
} else {
|
||||||
DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
|
DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
|
||||||
atom_card_info->ioreg_read = cail_reg_read;
|
atom_card_info->ioreg_read = cail_reg_read;
|
||||||
atom_card_info->ioreg_write = cail_reg_write;
|
atom_card_info->ioreg_write = cail_reg_write;
|
||||||
}
|
}
|
||||||
|
@ -1094,20 +1162,8 @@ static void amdgpu_check_block_size(struct amdgpu_device *adev)
|
||||||
if (amdgpu_vm_block_size < 9) {
|
if (amdgpu_vm_block_size < 9) {
|
||||||
dev_warn(adev->dev, "VM page table size (%d) too small\n",
|
dev_warn(adev->dev, "VM page table size (%d) too small\n",
|
||||||
amdgpu_vm_block_size);
|
amdgpu_vm_block_size);
|
||||||
goto def_value;
|
amdgpu_vm_block_size = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_vm_block_size > 24 ||
|
|
||||||
(amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
|
|
||||||
dev_warn(adev->dev, "VM page table size (%d) too large\n",
|
|
||||||
amdgpu_vm_block_size);
|
|
||||||
goto def_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
|
||||||
|
|
||||||
def_value:
|
|
||||||
amdgpu_vm_block_size = -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_check_vm_size(struct amdgpu_device *adev)
|
static void amdgpu_check_vm_size(struct amdgpu_device *adev)
|
||||||
|
@ -1116,31 +1172,11 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev)
|
||||||
if (amdgpu_vm_size == -1)
|
if (amdgpu_vm_size == -1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!is_power_of_2(amdgpu_vm_size)) {
|
|
||||||
dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
|
|
||||||
amdgpu_vm_size);
|
|
||||||
goto def_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (amdgpu_vm_size < 1) {
|
if (amdgpu_vm_size < 1) {
|
||||||
dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
|
dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
|
||||||
amdgpu_vm_size);
|
amdgpu_vm_size);
|
||||||
goto def_value;
|
amdgpu_vm_size = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Max GPUVM size for Cayman, SI, CI VI are 40 bits.
|
|
||||||
*/
|
|
||||||
if (amdgpu_vm_size > 1024) {
|
|
||||||
dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
|
|
||||||
amdgpu_vm_size);
|
|
||||||
goto def_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
|
||||||
|
|
||||||
def_value:
|
|
||||||
amdgpu_vm_size = -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1622,10 +1658,12 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
amdgpu_amdkfd_device_probe(adev);
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
r = amdgpu_virt_request_full_gpu(adev, true);
|
r = amdgpu_virt_request_full_gpu(adev, true);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||||
|
@ -1716,6 +1754,11 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
||||||
adev->ip_blocks[i].status.hw = true;
|
adev->ip_blocks[i].status.hw = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
amdgpu_amdkfd_device_init(adev);
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev))
|
||||||
|
amdgpu_virt_release_full_gpu(adev, true);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1783,6 +1826,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
|
amdgpu_amdkfd_device_fini(adev);
|
||||||
/* need to disable SMC first */
|
/* need to disable SMC first */
|
||||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||||
if (!adev->ip_blocks[i].status.hw)
|
if (!adev->ip_blocks[i].status.hw)
|
||||||
|
@ -1811,6 +1855,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
||||||
if (!adev->ip_blocks[i].status.hw)
|
if (!adev->ip_blocks[i].status.hw)
|
||||||
continue;
|
continue;
|
||||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||||
|
amdgpu_free_static_csa(adev);
|
||||||
amdgpu_wb_fini(adev);
|
amdgpu_wb_fini(adev);
|
||||||
amdgpu_vram_scratch_fini(adev);
|
amdgpu_vram_scratch_fini(adev);
|
||||||
}
|
}
|
||||||
|
@ -1859,7 +1904,8 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
if (amdgpu_sriov_vf(adev))
|
||||||
amdgpu_virt_release_full_gpu(adev, false);
|
if (amdgpu_virt_release_full_gpu(adev, false))
|
||||||
|
DRM_ERROR("failed to release exclusive mode on fini\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2163,6 +2209,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
mutex_init(&adev->mn_lock);
|
mutex_init(&adev->mn_lock);
|
||||||
mutex_init(&adev->virt.vf_errors.lock);
|
mutex_init(&adev->virt.vf_errors.lock);
|
||||||
hash_init(adev->mn_hash);
|
hash_init(adev->mn_hash);
|
||||||
|
mutex_init(&adev->lock_reset);
|
||||||
|
|
||||||
amdgpu_check_arguments(adev);
|
amdgpu_check_arguments(adev);
|
||||||
|
|
||||||
|
@ -2179,9 +2226,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
INIT_LIST_HEAD(&adev->shadow_list);
|
INIT_LIST_HEAD(&adev->shadow_list);
|
||||||
mutex_init(&adev->shadow_list_lock);
|
mutex_init(&adev->shadow_list_lock);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&adev->gtt_list);
|
|
||||||
spin_lock_init(&adev->gtt_list_lock);
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&adev->ring_lru_list);
|
INIT_LIST_HEAD(&adev->ring_lru_list);
|
||||||
spin_lock_init(&adev->ring_lru_list_lock);
|
spin_lock_init(&adev->ring_lru_list_lock);
|
||||||
|
|
||||||
|
@ -2267,8 +2311,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
dev_err(adev->dev, "gpu post error!\n");
|
dev_err(adev->dev, "gpu post error!\n");
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
DRM_INFO("GPU post is not needed\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adev->is_atom_fw) {
|
if (adev->is_atom_fw) {
|
||||||
|
@ -2305,6 +2347,18 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
|
|
||||||
r = amdgpu_init(adev);
|
r = amdgpu_init(adev);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
/* failed in exclusive mode due to timeout */
|
||||||
|
if (amdgpu_sriov_vf(adev) &&
|
||||||
|
!amdgpu_sriov_runtime(adev) &&
|
||||||
|
amdgpu_virt_mmio_blocked(adev) &&
|
||||||
|
!amdgpu_virt_wait_reset(adev)) {
|
||||||
|
dev_err(adev->dev, "VF exclusive mode timeout\n");
|
||||||
|
/* Don't send request since VF is inactive. */
|
||||||
|
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
|
||||||
|
adev->virt.ops = NULL;
|
||||||
|
r = -EAGAIN;
|
||||||
|
goto failed;
|
||||||
|
}
|
||||||
dev_err(adev->dev, "amdgpu_init failed\n");
|
dev_err(adev->dev, "amdgpu_init failed\n");
|
||||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
|
||||||
amdgpu_fini(adev);
|
amdgpu_fini(adev);
|
||||||
|
@ -2392,6 +2446,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
amdgpu_vf_error_trans_all(adev);
|
amdgpu_vf_error_trans_all(adev);
|
||||||
if (runtime)
|
if (runtime)
|
||||||
vga_switcheroo_fini_domain_pm_ops(adev->dev);
|
vga_switcheroo_fini_domain_pm_ops(adev->dev);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2414,7 +2469,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
||||||
/* evict vram memory */
|
/* evict vram memory */
|
||||||
amdgpu_bo_evict_vram(adev);
|
amdgpu_bo_evict_vram(adev);
|
||||||
amdgpu_ib_pool_fini(adev);
|
amdgpu_ib_pool_fini(adev);
|
||||||
amdgpu_fw_reserve_vram_fini(adev);
|
|
||||||
amdgpu_fence_driver_fini(adev);
|
amdgpu_fence_driver_fini(adev);
|
||||||
amdgpu_fbdev_fini(adev);
|
amdgpu_fbdev_fini(adev);
|
||||||
r = amdgpu_fini(adev);
|
r = amdgpu_fini(adev);
|
||||||
|
@ -2819,181 +2873,19 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* amdgpu_sriov_gpu_reset - reset the asic
|
* amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough
|
||||||
*
|
*
|
||||||
* @adev: amdgpu device pointer
|
* @adev: amdgpu device pointer
|
||||||
* @job: which job trigger hang
|
* @reset_flags: output param tells caller the reset result
|
||||||
*
|
*
|
||||||
* Attempt the reset the GPU if it has hung (all asics).
|
* attempt to do soft-reset or full-reset and reinitialize Asic
|
||||||
* for SRIOV case.
|
* return 0 means successed otherwise failed
|
||||||
* Returns 0 for success or an error on failure.
|
*/
|
||||||
*/
|
static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
|
||||||
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
|
|
||||||
{
|
{
|
||||||
int i, j, r = 0;
|
bool need_full_reset, vram_lost = 0;
|
||||||
int resched;
|
int r;
|
||||||
struct amdgpu_bo *bo, *tmp;
|
|
||||||
struct amdgpu_ring *ring;
|
|
||||||
struct dma_fence *fence = NULL, *next = NULL;
|
|
||||||
|
|
||||||
mutex_lock(&adev->virt.lock_reset);
|
|
||||||
atomic_inc(&adev->gpu_reset_counter);
|
|
||||||
adev->in_sriov_reset = true;
|
|
||||||
|
|
||||||
/* block TTM */
|
|
||||||
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
|
||||||
|
|
||||||
/* we start from the ring trigger GPU hang */
|
|
||||||
j = job ? job->ring->idx : 0;
|
|
||||||
|
|
||||||
/* block scheduler */
|
|
||||||
for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
|
|
||||||
ring = adev->rings[i % AMDGPU_MAX_RINGS];
|
|
||||||
if (!ring || !ring->sched.thread)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
kthread_park(ring->sched.thread);
|
|
||||||
|
|
||||||
if (job && j != i)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* here give the last chance to check if job removed from mirror-list
|
|
||||||
* since we already pay some time on kthread_park */
|
|
||||||
if (job && list_empty(&job->base.node)) {
|
|
||||||
kthread_unpark(ring->sched.thread);
|
|
||||||
goto give_up_reset;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
|
|
||||||
amd_sched_job_kickout(&job->base);
|
|
||||||
|
|
||||||
/* only do job_reset on the hang ring if @job not NULL */
|
|
||||||
amd_sched_hw_job_reset(&ring->sched);
|
|
||||||
|
|
||||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
|
||||||
amdgpu_fence_driver_force_completion_ring(ring);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* request to take full control of GPU before re-initialization */
|
|
||||||
if (job)
|
|
||||||
amdgpu_virt_reset_gpu(adev);
|
|
||||||
else
|
|
||||||
amdgpu_virt_request_full_gpu(adev, true);
|
|
||||||
|
|
||||||
|
|
||||||
/* Resume IP prior to SMC */
|
|
||||||
amdgpu_sriov_reinit_early(adev);
|
|
||||||
|
|
||||||
/* we need recover gart prior to run SMC/CP/SDMA resume */
|
|
||||||
amdgpu_ttm_recover_gart(adev);
|
|
||||||
|
|
||||||
/* now we are okay to resume SMC/CP/SDMA */
|
|
||||||
amdgpu_sriov_reinit_late(adev);
|
|
||||||
|
|
||||||
amdgpu_irq_gpu_reset_resume_helper(adev);
|
|
||||||
|
|
||||||
if (amdgpu_ib_ring_tests(adev))
|
|
||||||
dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
|
|
||||||
|
|
||||||
/* release full control of GPU after ib test */
|
|
||||||
amdgpu_virt_release_full_gpu(adev, true);
|
|
||||||
|
|
||||||
DRM_INFO("recover vram bo from shadow\n");
|
|
||||||
|
|
||||||
ring = adev->mman.buffer_funcs_ring;
|
|
||||||
mutex_lock(&adev->shadow_list_lock);
|
|
||||||
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
|
|
||||||
next = NULL;
|
|
||||||
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
|
|
||||||
if (fence) {
|
|
||||||
r = dma_fence_wait(fence, false);
|
|
||||||
if (r) {
|
|
||||||
WARN(r, "recovery from shadow isn't completed\n");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_fence_put(fence);
|
|
||||||
fence = next;
|
|
||||||
}
|
|
||||||
mutex_unlock(&adev->shadow_list_lock);
|
|
||||||
|
|
||||||
if (fence) {
|
|
||||||
r = dma_fence_wait(fence, false);
|
|
||||||
if (r)
|
|
||||||
WARN(r, "recovery from shadow isn't completed\n");
|
|
||||||
}
|
|
||||||
dma_fence_put(fence);
|
|
||||||
|
|
||||||
for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
|
|
||||||
ring = adev->rings[i % AMDGPU_MAX_RINGS];
|
|
||||||
if (!ring || !ring->sched.thread)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (job && j != i) {
|
|
||||||
kthread_unpark(ring->sched.thread);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
amd_sched_job_recovery(&ring->sched);
|
|
||||||
kthread_unpark(ring->sched.thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
drm_helper_resume_force_mode(adev->ddev);
|
|
||||||
give_up_reset:
|
|
||||||
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
|
||||||
if (r) {
|
|
||||||
/* bad news, how to tell it to userspace ? */
|
|
||||||
dev_info(adev->dev, "GPU reset failed\n");
|
|
||||||
} else {
|
|
||||||
dev_info(adev->dev, "GPU reset successed!\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
adev->in_sriov_reset = false;
|
|
||||||
mutex_unlock(&adev->virt.lock_reset);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* amdgpu_gpu_reset - reset the asic
|
|
||||||
*
|
|
||||||
* @adev: amdgpu device pointer
|
|
||||||
*
|
|
||||||
* Attempt the reset the GPU if it has hung (all asics).
|
|
||||||
* Returns 0 for success or an error on failure.
|
|
||||||
*/
|
|
||||||
int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
struct drm_atomic_state *state = NULL;
|
|
||||||
int i, r;
|
|
||||||
int resched;
|
|
||||||
bool need_full_reset, vram_lost = false;
|
|
||||||
|
|
||||||
if (!amdgpu_check_soft_reset(adev)) {
|
|
||||||
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_inc(&adev->gpu_reset_counter);
|
|
||||||
|
|
||||||
/* block TTM */
|
|
||||||
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
|
||||||
/* store modesetting */
|
|
||||||
if (amdgpu_device_has_dc_support(adev))
|
|
||||||
state = drm_atomic_helper_suspend(adev->ddev);
|
|
||||||
|
|
||||||
/* block scheduler */
|
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
||||||
struct amdgpu_ring *ring = adev->rings[i];
|
|
||||||
|
|
||||||
if (!ring || !ring->sched.thread)
|
|
||||||
continue;
|
|
||||||
kthread_park(ring->sched.thread);
|
|
||||||
amd_sched_hw_job_reset(&ring->sched);
|
|
||||||
}
|
|
||||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
|
||||||
amdgpu_fence_driver_force_completion(adev);
|
|
||||||
|
|
||||||
need_full_reset = amdgpu_need_full_reset(adev);
|
need_full_reset = amdgpu_need_full_reset(adev);
|
||||||
|
|
||||||
|
@ -3005,6 +2897,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
||||||
DRM_INFO("soft reset failed, will fallback to full reset!\n");
|
DRM_INFO("soft reset failed, will fallback to full reset!\n");
|
||||||
need_full_reset = true;
|
need_full_reset = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (need_full_reset) {
|
if (need_full_reset) {
|
||||||
|
@ -3022,21 +2915,27 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
||||||
r = amdgpu_resume_phase1(adev);
|
r = amdgpu_resume_phase1(adev);
|
||||||
if (r)
|
if (r)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
vram_lost = amdgpu_check_vram_lost(adev);
|
vram_lost = amdgpu_check_vram_lost(adev);
|
||||||
if (vram_lost) {
|
if (vram_lost) {
|
||||||
DRM_ERROR("VRAM is lost!\n");
|
DRM_ERROR("VRAM is lost!\n");
|
||||||
atomic_inc(&adev->vram_lost_counter);
|
atomic_inc(&adev->vram_lost_counter);
|
||||||
}
|
}
|
||||||
r = amdgpu_ttm_recover_gart(adev);
|
|
||||||
|
r = amdgpu_gtt_mgr_recover(
|
||||||
|
&adev->mman.bdev.man[TTM_PL_TT]);
|
||||||
if (r)
|
if (r)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
r = amdgpu_resume_phase2(adev);
|
r = amdgpu_resume_phase2(adev);
|
||||||
if (r)
|
if (r)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (vram_lost)
|
if (vram_lost)
|
||||||
amdgpu_fill_reset_magic(adev);
|
amdgpu_fill_reset_magic(adev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (!r) {
|
if (!r) {
|
||||||
amdgpu_irq_gpu_reset_resume_helper(adev);
|
amdgpu_irq_gpu_reset_resume_helper(adev);
|
||||||
|
@ -3047,11 +2946,132 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
||||||
need_full_reset = true;
|
need_full_reset = true;
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
/**
|
}
|
||||||
* recovery vm page tables, since we cannot depend on VRAM is
|
|
||||||
* consistent after gpu full reset.
|
if (reset_flags) {
|
||||||
*/
|
if (vram_lost)
|
||||||
if (need_full_reset && amdgpu_need_backup(adev)) {
|
(*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
|
||||||
|
|
||||||
|
if (need_full_reset)
|
||||||
|
(*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
|
||||||
|
}
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* amdgpu_reset_sriov - reset ASIC for SR-IOV vf
|
||||||
|
*
|
||||||
|
* @adev: amdgpu device pointer
|
||||||
|
* @reset_flags: output param tells caller the reset result
|
||||||
|
*
|
||||||
|
* do VF FLR and reinitialize Asic
|
||||||
|
* return 0 means successed otherwise failed
|
||||||
|
*/
|
||||||
|
static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (from_hypervisor)
|
||||||
|
r = amdgpu_virt_request_full_gpu(adev, true);
|
||||||
|
else
|
||||||
|
r = amdgpu_virt_reset_gpu(adev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
/* Resume IP prior to SMC */
|
||||||
|
r = amdgpu_sriov_reinit_early(adev);
|
||||||
|
if (r)
|
||||||
|
goto error;
|
||||||
|
|
||||||
|
/* we need recover gart prior to run SMC/CP/SDMA resume */
|
||||||
|
amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
|
||||||
|
|
||||||
|
/* now we are okay to resume SMC/CP/SDMA */
|
||||||
|
r = amdgpu_sriov_reinit_late(adev);
|
||||||
|
if (r)
|
||||||
|
goto error;
|
||||||
|
|
||||||
|
amdgpu_irq_gpu_reset_resume_helper(adev);
|
||||||
|
r = amdgpu_ib_ring_tests(adev);
|
||||||
|
if (r)
|
||||||
|
dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
|
||||||
|
|
||||||
|
error:
|
||||||
|
/* release full control of GPU after ib test */
|
||||||
|
amdgpu_virt_release_full_gpu(adev, true);
|
||||||
|
|
||||||
|
if (reset_flags) {
|
||||||
|
if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
|
||||||
|
(*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
|
||||||
|
atomic_inc(&adev->vram_lost_counter);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* VF FLR or hotlink reset is always full-reset */
|
||||||
|
(*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
|
||||||
|
}
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_gpu_recover - reset the asic and recover scheduler
|
||||||
|
*
|
||||||
|
* @adev: amdgpu device pointer
|
||||||
|
* @job: which job trigger hang
|
||||||
|
*
|
||||||
|
* Attempt to reset the GPU if it has hung (all asics).
|
||||||
|
* Returns 0 for success or an error on failure.
|
||||||
|
*/
|
||||||
|
int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
|
||||||
|
{
|
||||||
|
struct drm_atomic_state *state = NULL;
|
||||||
|
uint64_t reset_flags = 0;
|
||||||
|
int i, r, resched;
|
||||||
|
|
||||||
|
if (!amdgpu_check_soft_reset(adev)) {
|
||||||
|
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_info(adev->dev, "GPU reset begin!\n");
|
||||||
|
|
||||||
|
mutex_lock(&adev->lock_reset);
|
||||||
|
atomic_inc(&adev->gpu_reset_counter);
|
||||||
|
adev->in_gpu_reset = 1;
|
||||||
|
|
||||||
|
/* block TTM */
|
||||||
|
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
||||||
|
/* store modesetting */
|
||||||
|
if (amdgpu_device_has_dc_support(adev))
|
||||||
|
state = drm_atomic_helper_suspend(adev->ddev);
|
||||||
|
|
||||||
|
/* block scheduler */
|
||||||
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||||
|
struct amdgpu_ring *ring = adev->rings[i];
|
||||||
|
|
||||||
|
if (!ring || !ring->sched.thread)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* only focus on the ring hit timeout if &job not NULL */
|
||||||
|
if (job && job->ring->idx != i)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
kthread_park(ring->sched.thread);
|
||||||
|
amd_sched_hw_job_reset(&ring->sched, &job->base);
|
||||||
|
|
||||||
|
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
||||||
|
amdgpu_fence_driver_force_completion(ring);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev))
|
||||||
|
r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true);
|
||||||
|
else
|
||||||
|
r = amdgpu_reset(adev, &reset_flags);
|
||||||
|
|
||||||
|
if (!r) {
|
||||||
|
if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
|
||||||
|
(reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
|
||||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||||
struct amdgpu_bo *bo, *tmp;
|
struct amdgpu_bo *bo, *tmp;
|
||||||
struct dma_fence *fence = NULL, *next = NULL;
|
struct dma_fence *fence = NULL, *next = NULL;
|
||||||
|
@ -3080,40 +3100,56 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||||
struct amdgpu_ring *ring = adev->rings[i];
|
struct amdgpu_ring *ring = adev->rings[i];
|
||||||
|
|
||||||
if (!ring || !ring->sched.thread)
|
if (!ring || !ring->sched.thread)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/* only focus on the ring hit timeout if &job not NULL */
|
||||||
|
if (job && job->ring->idx != i)
|
||||||
|
continue;
|
||||||
|
|
||||||
amd_sched_job_recovery(&ring->sched);
|
amd_sched_job_recovery(&ring->sched);
|
||||||
kthread_unpark(ring->sched.thread);
|
kthread_unpark(ring->sched.thread);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dev_err(adev->dev, "asic resume failed (%d).\n", r);
|
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||||
if (adev->rings[i] && adev->rings[i]->sched.thread) {
|
struct amdgpu_ring *ring = adev->rings[i];
|
||||||
kthread_unpark(adev->rings[i]->sched.thread);
|
|
||||||
}
|
if (!ring || !ring->sched.thread)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* only focus on the ring hit timeout if &job not NULL */
|
||||||
|
if (job && job->ring->idx != i)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
kthread_unpark(adev->rings[i]->sched.thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_device_has_dc_support(adev)) {
|
if (amdgpu_device_has_dc_support(adev)) {
|
||||||
r = drm_atomic_helper_resume(adev->ddev, state);
|
if (drm_atomic_helper_resume(adev->ddev, state))
|
||||||
|
dev_info(adev->dev, "drm resume failed:%d\n", r);
|
||||||
amdgpu_dm_display_resume(adev);
|
amdgpu_dm_display_resume(adev);
|
||||||
} else
|
} else {
|
||||||
drm_helper_resume_force_mode(adev->ddev);
|
drm_helper_resume_force_mode(adev->ddev);
|
||||||
|
}
|
||||||
|
|
||||||
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
/* bad news, how to tell it to userspace ? */
|
/* bad news, how to tell it to userspace ? */
|
||||||
dev_info(adev->dev, "GPU reset failed\n");
|
dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
|
||||||
}
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
|
||||||
else {
|
} else {
|
||||||
dev_info(adev->dev, "GPU reset successed!\n");
|
dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_vf_error_trans_all(adev);
|
amdgpu_vf_error_trans_all(adev);
|
||||||
|
adev->in_gpu_reset = 0;
|
||||||
|
mutex_unlock(&adev->lock_reset);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,7 @@
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
#include <drm/drm_crtc_helper.h>
|
#include <drm/drm_crtc_helper.h>
|
||||||
#include <drm/drm_edid.h>
|
#include <drm/drm_edid.h>
|
||||||
|
#include <drm/drm_fb_helper.h>
|
||||||
|
|
||||||
static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
|
static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
|
||||||
{
|
{
|
||||||
|
@ -556,15 +557,9 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
|
||||||
return &amdgpu_fb->base;
|
return &amdgpu_fb->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_output_poll_changed(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
amdgpu_fb_output_poll_changed(adev);
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct drm_mode_config_funcs amdgpu_mode_funcs = {
|
const struct drm_mode_config_funcs amdgpu_mode_funcs = {
|
||||||
.fb_create = amdgpu_user_framebuffer_create,
|
.fb_create = amdgpu_user_framebuffer_create,
|
||||||
.output_poll_changed = amdgpu_output_poll_changed
|
.output_poll_changed = drm_fb_helper_output_poll_changed,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
|
static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
|
||||||
|
|
|
@ -25,9 +25,7 @@
|
||||||
|
|
||||||
struct drm_framebuffer *
|
struct drm_framebuffer *
|
||||||
amdgpu_user_framebuffer_create(struct drm_device *dev,
|
amdgpu_user_framebuffer_create(struct drm_device *dev,
|
||||||
struct drm_file *file_priv,
|
struct drm_file *file_priv,
|
||||||
const struct drm_mode_fb_cmd2 *mode_cmd);
|
const struct drm_mode_fb_cmd2 *mode_cmd);
|
||||||
|
|
||||||
void amdgpu_output_poll_changed(struct drm_device *dev);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -360,6 +360,12 @@ enum amdgpu_pcie_gen {
|
||||||
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
|
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
|
||||||
(adev)->powerplay.pp_handle, msg_id))
|
(adev)->powerplay.pp_handle, msg_id))
|
||||||
|
|
||||||
|
#define amdgpu_dpm_notify_smu_memory_info(adev, virtual_addr_low, \
|
||||||
|
virtual_addr_hi, mc_addr_low, mc_addr_hi, size) \
|
||||||
|
((adev)->powerplay.pp_funcs->notify_smu_memory_info)( \
|
||||||
|
(adev)->powerplay.pp_handle, virtual_addr_low, \
|
||||||
|
virtual_addr_hi, mc_addr_low, mc_addr_hi, size)
|
||||||
|
|
||||||
struct amdgpu_dpm {
|
struct amdgpu_dpm {
|
||||||
struct amdgpu_ps *ps;
|
struct amdgpu_ps *ps;
|
||||||
/* number of valid power states */
|
/* number of valid power states */
|
||||||
|
|
|
@ -216,7 +216,7 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
|
||||||
MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
|
MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
|
||||||
module_param_named(dc, amdgpu_dc, int, 0444);
|
module_param_named(dc, amdgpu_dc, int, 0444);
|
||||||
|
|
||||||
MODULE_PARM_DESC(dc, "Display Core Log Level (0 = minimal (default), 1 = chatty");
|
MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = chatty");
|
||||||
module_param_named(dc_log, amdgpu_dc_log, int, 0444);
|
module_param_named(dc_log, amdgpu_dc_log, int, 0444);
|
||||||
|
|
||||||
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
|
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
|
||||||
|
@ -306,7 +306,6 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
|
||||||
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
|
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
static const struct pci_device_id pciidlist[] = {
|
static const struct pci_device_id pciidlist[] = {
|
||||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||||
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||||
|
@ -566,12 +565,13 @@ static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int amdgpu_pci_probe(struct pci_dev *pdev,
|
static int amdgpu_pci_probe(struct pci_dev *pdev,
|
||||||
const struct pci_device_id *ent)
|
const struct pci_device_id *ent)
|
||||||
{
|
{
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
unsigned long flags = ent->driver_data;
|
unsigned long flags = ent->driver_data;
|
||||||
int ret;
|
int ret, retry = 0;
|
||||||
|
|
||||||
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
|
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
|
||||||
DRM_INFO("This hardware requires experimental hardware support.\n"
|
DRM_INFO("This hardware requires experimental hardware support.\n"
|
||||||
|
@ -604,8 +604,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
|
||||||
|
|
||||||
pci_set_drvdata(pdev, dev);
|
pci_set_drvdata(pdev, dev);
|
||||||
|
|
||||||
|
retry_init:
|
||||||
ret = drm_dev_register(dev, ent->driver_data);
|
ret = drm_dev_register(dev, ent->driver_data);
|
||||||
if (ret)
|
if (ret == -EAGAIN && ++retry <= 3) {
|
||||||
|
DRM_INFO("retry init %d\n", retry);
|
||||||
|
/* Don't request EX mode too frequently which is attacking */
|
||||||
|
msleep(5000);
|
||||||
|
goto retry_init;
|
||||||
|
} else if (ret)
|
||||||
goto err_pci;
|
goto err_pci;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -283,12 +283,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
if (adev->mode_info.rfbdev)
|
|
||||||
drm_fb_helper_hotplug_event(&adev->mode_info.rfbdev->helper);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
|
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
|
||||||
{
|
{
|
||||||
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
|
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
|
||||||
|
@ -393,24 +387,3 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
|
||||||
return true;
|
return true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
struct amdgpu_fbdev *afbdev;
|
|
||||||
struct drm_fb_helper *fb_helper;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!adev)
|
|
||||||
return;
|
|
||||||
|
|
||||||
afbdev = adev->mode_info.rfbdev;
|
|
||||||
|
|
||||||
if (!afbdev)
|
|
||||||
return;
|
|
||||||
|
|
||||||
fb_helper = &afbdev->helper;
|
|
||||||
|
|
||||||
ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
|
|
||||||
if (ret)
|
|
||||||
DRM_DEBUG("failed to restore crtc mode\n");
|
|
||||||
}
|
|
||||||
|
|
|
@ -391,9 +391,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
||||||
ring->fence_drv.irq_type = irq_type;
|
ring->fence_drv.irq_type = irq_type;
|
||||||
ring->fence_drv.initialized = true;
|
ring->fence_drv.initialized = true;
|
||||||
|
|
||||||
dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
|
dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
|
||||||
"cpu addr 0x%p\n", ring->idx,
|
"cpu addr 0x%p\n", ring->idx,
|
||||||
ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
|
ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -446,7 +446,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||||
timeout = MAX_SCHEDULE_TIMEOUT;
|
timeout = MAX_SCHEDULE_TIMEOUT;
|
||||||
}
|
}
|
||||||
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
|
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||||
num_hw_submission,
|
num_hw_submission, amdgpu_job_hang_limit,
|
||||||
timeout, ring->name);
|
timeout, ring->name);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
||||||
|
@ -499,7 +499,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
||||||
r = amdgpu_fence_wait_empty(ring);
|
r = amdgpu_fence_wait_empty(ring);
|
||||||
if (r) {
|
if (r) {
|
||||||
/* no need to trigger GPU reset as we are unloading */
|
/* no need to trigger GPU reset as we are unloading */
|
||||||
amdgpu_fence_driver_force_completion(adev);
|
amdgpu_fence_driver_force_completion(ring);
|
||||||
}
|
}
|
||||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||||
ring->fence_drv.irq_type);
|
ring->fence_drv.irq_type);
|
||||||
|
@ -534,7 +534,7 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
|
||||||
r = amdgpu_fence_wait_empty(ring);
|
r = amdgpu_fence_wait_empty(ring);
|
||||||
if (r) {
|
if (r) {
|
||||||
/* delay GPU reset to resume */
|
/* delay GPU reset to resume */
|
||||||
amdgpu_fence_driver_force_completion(adev);
|
amdgpu_fence_driver_force_completion(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* disable the interrupt */
|
/* disable the interrupt */
|
||||||
|
@ -571,30 +571,15 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_fence_driver_force_completion - force all fence waiter to complete
|
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
|
||||||
*
|
*
|
||||||
* @adev: amdgpu device pointer
|
* @ring: fence of the ring to signal
|
||||||
*
|
*
|
||||||
* In case of GPU reset failure make sure no process keep waiting on fence
|
|
||||||
* that will never complete.
|
|
||||||
*/
|
*/
|
||||||
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
|
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
int i;
|
amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
|
||||||
|
amdgpu_fence_process(ring);
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
|
||||||
struct amdgpu_ring *ring = adev->rings[i];
|
|
||||||
if (!ring || !ring->fence_drv.initialized)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring)
|
|
||||||
{
|
|
||||||
if (ring)
|
|
||||||
amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -709,25 +694,25 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
|
* amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
|
||||||
*
|
*
|
||||||
* Manually trigger a gpu reset at the next fence wait.
|
* Manually trigger a gpu reset at the next fence wait.
|
||||||
*/
|
*/
|
||||||
static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
|
static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
|
||||||
{
|
{
|
||||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||||
struct drm_device *dev = node->minor->dev;
|
struct drm_device *dev = node->minor->dev;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
|
||||||
seq_printf(m, "gpu reset\n");
|
seq_printf(m, "gpu recover\n");
|
||||||
amdgpu_gpu_reset(adev);
|
amdgpu_gpu_recover(adev, NULL);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
|
static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
|
||||||
{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
|
{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
|
||||||
{"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
|
{"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
|
static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
|
||||||
|
|
|
@ -56,63 +56,6 @@
|
||||||
* Common GART table functions.
|
* Common GART table functions.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
|
||||||
* amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
*
|
|
||||||
* Allocate system memory for GART page table
|
|
||||||
* (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
|
|
||||||
* gart table to be in system memory.
|
|
||||||
* Returns 0 for success, -ENOMEM for failure.
|
|
||||||
*/
|
|
||||||
int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
void *ptr;
|
|
||||||
|
|
||||||
ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
|
|
||||||
&adev->gart.table_addr);
|
|
||||||
if (ptr == NULL) {
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
#ifdef CONFIG_X86
|
|
||||||
if (0) {
|
|
||||||
set_memory_uc((unsigned long)ptr,
|
|
||||||
adev->gart.table_size >> PAGE_SHIFT);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
adev->gart.ptr = ptr;
|
|
||||||
memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* amdgpu_gart_table_ram_free - free system ram for gart page table
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
*
|
|
||||||
* Free system memory for GART page table
|
|
||||||
* (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
|
|
||||||
* gart table to be in system memory.
|
|
||||||
*/
|
|
||||||
void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
if (adev->gart.ptr == NULL) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#ifdef CONFIG_X86
|
|
||||||
if (0) {
|
|
||||||
set_memory_wb((unsigned long)adev->gart.ptr,
|
|
||||||
adev->gart.table_size >> PAGE_SHIFT);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
pci_free_consistent(adev->pdev, adev->gart.table_size,
|
|
||||||
(void *)adev->gart.ptr,
|
|
||||||
adev->gart.table_addr);
|
|
||||||
adev->gart.ptr = NULL;
|
|
||||||
adev->gart.table_addr = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_gart_table_vram_alloc - allocate vram for gart page table
|
* amdgpu_gart_table_vram_alloc - allocate vram for gart page table
|
||||||
*
|
*
|
||||||
|
@ -377,10 +320,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
|
||||||
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||||
/* Allocate pages table */
|
/* Allocate pages table */
|
||||||
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
|
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
|
||||||
if (adev->gart.pages == NULL) {
|
if (adev->gart.pages == NULL)
|
||||||
amdgpu_gart_fini(adev);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -395,11 +336,6 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
void amdgpu_gart_fini(struct amdgpu_device *adev)
|
void amdgpu_gart_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
if (adev->gart.ready) {
|
|
||||||
/* unbind pages */
|
|
||||||
amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
|
|
||||||
}
|
|
||||||
adev->gart.ready = false;
|
|
||||||
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||||
vfree(adev->gart.pages);
|
vfree(adev->gart.pages);
|
||||||
adev->gart.pages = NULL;
|
adev->gart.pages = NULL;
|
||||||
|
|
|
@ -39,7 +39,7 @@ struct amdgpu_gart_funcs;
|
||||||
#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
|
#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
|
||||||
|
|
||||||
struct amdgpu_gart {
|
struct amdgpu_gart {
|
||||||
dma_addr_t table_addr;
|
u64 table_addr;
|
||||||
struct amdgpu_bo *robj;
|
struct amdgpu_bo *robj;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
unsigned num_gpu_pages;
|
unsigned num_gpu_pages;
|
||||||
|
@ -56,8 +56,6 @@ struct amdgpu_gart {
|
||||||
const struct amdgpu_gart_funcs *gart_funcs;
|
const struct amdgpu_gart_funcs *gart_funcs;
|
||||||
};
|
};
|
||||||
|
|
||||||
int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
|
|
||||||
void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
|
|
||||||
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
|
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
|
||||||
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
|
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
|
||||||
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
|
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
|
||||||
|
|
|
@ -72,7 +72,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
||||||
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
|
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
|
DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
|
||||||
size, initial_domain, alignment, r);
|
size, initial_domain, alignment, r);
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
|
@ -282,6 +282,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||||
int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *filp)
|
struct drm_file *filp)
|
||||||
{
|
{
|
||||||
|
struct ttm_operation_ctx ctx = { true, false };
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
struct drm_amdgpu_gem_userptr *args = data;
|
struct drm_amdgpu_gem_userptr *args = data;
|
||||||
struct drm_gem_object *gobj;
|
struct drm_gem_object *gobj;
|
||||||
|
@ -335,7 +336,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||||
goto free_pages;
|
goto free_pages;
|
||||||
|
|
||||||
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
|
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||||
amdgpu_bo_unreserve(bo);
|
amdgpu_bo_unreserve(bo);
|
||||||
if (r)
|
if (r)
|
||||||
goto free_pages;
|
goto free_pages;
|
||||||
|
@ -557,14 +558,25 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
|
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
|
||||||
dev_err(&dev->pdev->dev,
|
dev_dbg(&dev->pdev->dev,
|
||||||
"va_address 0x%LX is in reserved area 0x%LX\n",
|
"va_address 0x%LX is in reserved area 0x%LX\n",
|
||||||
args->va_address, AMDGPU_VA_RESERVED_SIZE);
|
args->va_address, AMDGPU_VA_RESERVED_SIZE);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (args->va_address >= AMDGPU_VA_HOLE_START &&
|
||||||
|
args->va_address < AMDGPU_VA_HOLE_END) {
|
||||||
|
dev_dbg(&dev->pdev->dev,
|
||||||
|
"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
|
||||||
|
args->va_address, AMDGPU_VA_HOLE_START,
|
||||||
|
AMDGPU_VA_HOLE_END);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
args->va_address &= AMDGPU_VA_HOLE_MASK;
|
||||||
|
|
||||||
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
|
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
|
||||||
dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
|
dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
|
||||||
args->flags);
|
args->flags);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -576,7 +588,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||||
case AMDGPU_VA_OP_REPLACE:
|
case AMDGPU_VA_OP_REPLACE:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
dev_err(&dev->pdev->dev, "unsupported operation %d\n",
|
dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
|
||||||
args->operation);
|
args->operation);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,11 @@ struct amdgpu_gtt_mgr {
|
||||||
atomic64_t available;
|
atomic64_t available;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct amdgpu_gtt_node {
|
||||||
|
struct drm_mm_node node;
|
||||||
|
struct ttm_buffer_object *tbo;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM
|
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM
|
||||||
*
|
*
|
||||||
|
@ -79,17 +84,17 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_gtt_mgr_is_allocated - Check if mem has address space
|
* amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
|
||||||
*
|
*
|
||||||
* @mem: the mem object to check
|
* @mem: the mem object to check
|
||||||
*
|
*
|
||||||
* Check if a mem object has already address space allocated.
|
* Check if a mem object has already address space allocated.
|
||||||
*/
|
*/
|
||||||
bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem)
|
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
|
||||||
{
|
{
|
||||||
struct drm_mm_node *node = mem->mm_node;
|
struct amdgpu_gtt_node *node = mem->mm_node;
|
||||||
|
|
||||||
return (node->start != AMDGPU_BO_INVALID_OFFSET);
|
return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -109,12 +114,12 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
|
||||||
struct amdgpu_gtt_mgr *mgr = man->priv;
|
struct amdgpu_gtt_mgr *mgr = man->priv;
|
||||||
struct drm_mm_node *node = mem->mm_node;
|
struct amdgpu_gtt_node *node = mem->mm_node;
|
||||||
enum drm_mm_insert_mode mode;
|
enum drm_mm_insert_mode mode;
|
||||||
unsigned long fpfn, lpfn;
|
unsigned long fpfn, lpfn;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (amdgpu_gtt_mgr_is_allocated(mem))
|
if (amdgpu_gtt_mgr_has_gart_addr(mem))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (place)
|
if (place)
|
||||||
|
@ -132,13 +137,13 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
|
||||||
mode = DRM_MM_INSERT_HIGH;
|
mode = DRM_MM_INSERT_HIGH;
|
||||||
|
|
||||||
spin_lock(&mgr->lock);
|
spin_lock(&mgr->lock);
|
||||||
r = drm_mm_insert_node_in_range(&mgr->mm, node,
|
r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
|
||||||
mem->num_pages, mem->page_alignment, 0,
|
mem->page_alignment, 0, fpfn, lpfn,
|
||||||
fpfn, lpfn, mode);
|
mode);
|
||||||
spin_unlock(&mgr->lock);
|
spin_unlock(&mgr->lock);
|
||||||
|
|
||||||
if (!r)
|
if (!r)
|
||||||
mem->start = node->start;
|
mem->start = node->node.start;
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -159,7 +164,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
|
||||||
struct ttm_mem_reg *mem)
|
struct ttm_mem_reg *mem)
|
||||||
{
|
{
|
||||||
struct amdgpu_gtt_mgr *mgr = man->priv;
|
struct amdgpu_gtt_mgr *mgr = man->priv;
|
||||||
struct drm_mm_node *node;
|
struct amdgpu_gtt_node *node;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
spin_lock(&mgr->lock);
|
spin_lock(&mgr->lock);
|
||||||
|
@ -177,8 +182,9 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
node->start = AMDGPU_BO_INVALID_OFFSET;
|
node->node.start = AMDGPU_BO_INVALID_OFFSET;
|
||||||
node->size = mem->num_pages;
|
node->node.size = mem->num_pages;
|
||||||
|
node->tbo = tbo;
|
||||||
mem->mm_node = node;
|
mem->mm_node = node;
|
||||||
|
|
||||||
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
|
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
|
||||||
|
@ -190,7 +196,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
mem->start = node->start;
|
mem->start = node->node.start;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -214,14 +220,14 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
|
||||||
struct ttm_mem_reg *mem)
|
struct ttm_mem_reg *mem)
|
||||||
{
|
{
|
||||||
struct amdgpu_gtt_mgr *mgr = man->priv;
|
struct amdgpu_gtt_mgr *mgr = man->priv;
|
||||||
struct drm_mm_node *node = mem->mm_node;
|
struct amdgpu_gtt_node *node = mem->mm_node;
|
||||||
|
|
||||||
if (!node)
|
if (!node)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock(&mgr->lock);
|
spin_lock(&mgr->lock);
|
||||||
if (node->start != AMDGPU_BO_INVALID_OFFSET)
|
if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
|
||||||
drm_mm_remove_node(node);
|
drm_mm_remove_node(&node->node);
|
||||||
spin_unlock(&mgr->lock);
|
spin_unlock(&mgr->lock);
|
||||||
atomic64_add(mem->num_pages, &mgr->available);
|
atomic64_add(mem->num_pages, &mgr->available);
|
||||||
|
|
||||||
|
@ -244,6 +250,25 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
|
||||||
return (result > 0 ? result : 0) * PAGE_SIZE;
|
return (result > 0 ? result : 0) * PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
|
||||||
|
{
|
||||||
|
struct amdgpu_gtt_mgr *mgr = man->priv;
|
||||||
|
struct amdgpu_gtt_node *node;
|
||||||
|
struct drm_mm_node *mm_node;
|
||||||
|
int r = 0;
|
||||||
|
|
||||||
|
spin_lock(&mgr->lock);
|
||||||
|
drm_mm_for_each_node(mm_node, &mgr->mm) {
|
||||||
|
node = container_of(mm_node, struct amdgpu_gtt_node, node);
|
||||||
|
r = amdgpu_ttm_recover_gart(node->tbo);
|
||||||
|
if (r)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
spin_unlock(&mgr->lock);
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_gtt_mgr_debug - dump VRAM table
|
* amdgpu_gtt_mgr_debug - dump VRAM table
|
||||||
*
|
*
|
||||||
|
|
|
@ -164,7 +164,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ring->funcs->emit_pipeline_sync && job &&
|
if (ring->funcs->emit_pipeline_sync && job &&
|
||||||
((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
|
((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
|
||||||
amdgpu_vm_need_pipeline_sync(ring, job))) {
|
amdgpu_vm_need_pipeline_sync(ring, job))) {
|
||||||
need_pipe_sync = true;
|
need_pipe_sync = true;
|
||||||
dma_fence_put(tmp);
|
dma_fence_put(tmp);
|
||||||
|
|
|
@ -88,7 +88,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
|
||||||
reset_work);
|
reset_work);
|
||||||
|
|
||||||
if (!amdgpu_sriov_vf(adev))
|
if (!amdgpu_sriov_vf(adev))
|
||||||
amdgpu_gpu_reset(adev);
|
amdgpu_gpu_recover(adev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Disable *all* interrupts */
|
/* Disable *all* interrupts */
|
||||||
|
@ -232,7 +232,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
||||||
int ret = pci_enable_msi(adev->pdev);
|
int ret = pci_enable_msi(adev->pdev);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
adev->irq.msi_enabled = true;
|
adev->irq.msi_enabled = true;
|
||||||
dev_info(adev->dev, "amdgpu: using MSI.\n");
|
dev_dbg(adev->dev, "amdgpu: using MSI.\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,7 +262,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_INFO("amdgpu: irq initialized.\n");
|
DRM_DEBUG("amdgpu: irq initialized.\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,10 +37,7 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
|
||||||
atomic_read(&job->ring->fence_drv.last_seq),
|
atomic_read(&job->ring->fence_drv.last_seq),
|
||||||
job->ring->fence_drv.sync_seq);
|
job->ring->fence_drv.sync_seq);
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(job->adev))
|
amdgpu_gpu_recover(job->adev, job);
|
||||||
amdgpu_sriov_gpu_reset(job->adev, job);
|
|
||||||
else
|
|
||||||
amdgpu_gpu_reset(job->adev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||||
|
@ -63,7 +60,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||||
(*job)->num_ibs = num_ibs;
|
(*job)->num_ibs = num_ibs;
|
||||||
|
|
||||||
amdgpu_sync_create(&(*job)->sync);
|
amdgpu_sync_create(&(*job)->sync);
|
||||||
amdgpu_sync_create(&(*job)->dep_sync);
|
|
||||||
amdgpu_sync_create(&(*job)->sched_sync);
|
amdgpu_sync_create(&(*job)->sched_sync);
|
||||||
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
|
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
|
||||||
|
|
||||||
|
@ -104,10 +100,9 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
|
||||||
{
|
{
|
||||||
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
|
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
|
||||||
|
|
||||||
amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
|
amdgpu_ring_priority_put(job->ring, s_job->s_priority);
|
||||||
dma_fence_put(job->fence);
|
dma_fence_put(job->fence);
|
||||||
amdgpu_sync_free(&job->sync);
|
amdgpu_sync_free(&job->sync);
|
||||||
amdgpu_sync_free(&job->dep_sync);
|
|
||||||
amdgpu_sync_free(&job->sched_sync);
|
amdgpu_sync_free(&job->sched_sync);
|
||||||
kfree(job);
|
kfree(job);
|
||||||
}
|
}
|
||||||
|
@ -118,7 +113,6 @@ void amdgpu_job_free(struct amdgpu_job *job)
|
||||||
|
|
||||||
dma_fence_put(job->fence);
|
dma_fence_put(job->fence);
|
||||||
amdgpu_sync_free(&job->sync);
|
amdgpu_sync_free(&job->sync);
|
||||||
amdgpu_sync_free(&job->dep_sync);
|
|
||||||
amdgpu_sync_free(&job->sched_sync);
|
amdgpu_sync_free(&job->sched_sync);
|
||||||
kfree(job);
|
kfree(job);
|
||||||
}
|
}
|
||||||
|
@ -141,28 +135,29 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||||
job->fence_ctx = entity->fence_context;
|
job->fence_ctx = entity->fence_context;
|
||||||
*f = dma_fence_get(&job->base.s_fence->finished);
|
*f = dma_fence_get(&job->base.s_fence->finished);
|
||||||
amdgpu_job_free_resources(job);
|
amdgpu_job_free_resources(job);
|
||||||
amdgpu_ring_priority_get(job->ring,
|
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
|
||||||
amd_sched_get_job_priority(&job->base));
|
amd_sched_entity_push_job(&job->base, entity);
|
||||||
amd_sched_entity_push_job(&job->base);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
|
static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
|
||||||
|
struct amd_sched_entity *s_entity)
|
||||||
{
|
{
|
||||||
struct amdgpu_job *job = to_amdgpu_job(sched_job);
|
struct amdgpu_job *job = to_amdgpu_job(sched_job);
|
||||||
struct amdgpu_vm *vm = job->vm;
|
struct amdgpu_vm *vm = job->vm;
|
||||||
|
bool explicit = false;
|
||||||
struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
|
|
||||||
int r;
|
int r;
|
||||||
|
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
|
||||||
|
|
||||||
if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
|
if (fence && explicit) {
|
||||||
r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
|
if (amd_sched_dependency_optimized(fence, s_entity)) {
|
||||||
if (r)
|
r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
|
||||||
DRM_ERROR("Error adding fence to sync (%d)\n", r);
|
if (r)
|
||||||
|
DRM_ERROR("Error adding fence to sync (%d)\n", r);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (!fence)
|
|
||||||
fence = amdgpu_sync_get_fence(&job->sync);
|
|
||||||
while (fence == NULL && vm && !job->vm_id) {
|
while (fence == NULL && vm && !job->vm_id) {
|
||||||
struct amdgpu_ring *ring = job->ring;
|
struct amdgpu_ring *ring = job->ring;
|
||||||
|
|
||||||
|
@ -172,7 +167,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
|
||||||
if (r)
|
if (r)
|
||||||
DRM_ERROR("Error getting VM ID (%d)\n", r);
|
DRM_ERROR("Error getting VM ID (%d)\n", r);
|
||||||
|
|
||||||
fence = amdgpu_sync_get_fence(&job->sync);
|
fence = amdgpu_sync_get_fence(&job->sync, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return fence;
|
return fence;
|
||||||
|
@ -180,7 +175,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
|
||||||
|
|
||||||
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
||||||
{
|
{
|
||||||
struct dma_fence *fence = NULL;
|
struct dma_fence *fence = NULL, *finished;
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
int r;
|
int r;
|
||||||
|
@ -190,15 +185,18 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
job = to_amdgpu_job(sched_job);
|
job = to_amdgpu_job(sched_job);
|
||||||
|
finished = &job->base.s_fence->finished;
|
||||||
adev = job->adev;
|
adev = job->adev;
|
||||||
|
|
||||||
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
|
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
|
||||||
|
|
||||||
trace_amdgpu_sched_run_job(job);
|
trace_amdgpu_sched_run_job(job);
|
||||||
/* skip ib schedule when vram is lost */
|
|
||||||
if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
|
if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
|
||||||
dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
|
dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
|
||||||
DRM_ERROR("Skip scheduling IBs!\n");
|
|
||||||
|
if (finished->error < 0) {
|
||||||
|
DRM_INFO("Skip scheduling IBs!\n");
|
||||||
} else {
|
} else {
|
||||||
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
|
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
|
||||||
&fence);
|
&fence);
|
||||||
|
|
|
@ -63,8 +63,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
|
||||||
pm_runtime_forbid(dev->dev);
|
pm_runtime_forbid(dev->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_amdkfd_device_fini(adev);
|
|
||||||
|
|
||||||
amdgpu_acpi_fini(adev);
|
amdgpu_acpi_fini(adev);
|
||||||
|
|
||||||
amdgpu_device_fini(adev);
|
amdgpu_device_fini(adev);
|
||||||
|
@ -159,9 +157,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
||||||
"Error during ACPI methods call\n");
|
"Error during ACPI methods call\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_amdkfd_device_probe(adev);
|
|
||||||
amdgpu_amdkfd_device_init(adev);
|
|
||||||
|
|
||||||
if (amdgpu_device_is_px(dev)) {
|
if (amdgpu_device_is_px(dev)) {
|
||||||
pm_runtime_use_autosuspend(dev->dev);
|
pm_runtime_use_autosuspend(dev->dev);
|
||||||
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
|
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
|
||||||
|
@ -171,9 +166,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
||||||
pm_runtime_put_autosuspend(dev->dev);
|
pm_runtime_put_autosuspend(dev->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
|
||||||
amdgpu_virt_release_full_gpu(adev, true);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (r) {
|
if (r) {
|
||||||
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
|
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
|
||||||
|
@ -558,6 +550,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
}
|
}
|
||||||
case AMDGPU_INFO_DEV_INFO: {
|
case AMDGPU_INFO_DEV_INFO: {
|
||||||
struct drm_amdgpu_info_device dev_info = {};
|
struct drm_amdgpu_info_device dev_info = {};
|
||||||
|
uint64_t vm_size;
|
||||||
|
|
||||||
dev_info.device_id = dev->pdev->device;
|
dev_info.device_id = dev->pdev->device;
|
||||||
dev_info.chip_rev = adev->rev_id;
|
dev_info.chip_rev = adev->rev_id;
|
||||||
|
@ -585,8 +578,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
|
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
|
||||||
if (amdgpu_sriov_vf(adev))
|
if (amdgpu_sriov_vf(adev))
|
||||||
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
|
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
|
||||||
|
|
||||||
|
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
|
||||||
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
|
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
|
||||||
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
|
dev_info.virtual_address_max =
|
||||||
|
min(vm_size, AMDGPU_VA_HOLE_START);
|
||||||
|
|
||||||
|
vm_size -= AMDGPU_VA_RESERVED_SIZE;
|
||||||
|
if (vm_size > AMDGPU_VA_HOLE_START) {
|
||||||
|
dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
|
||||||
|
dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
|
||||||
|
}
|
||||||
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
|
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
|
||||||
dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
|
dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
|
||||||
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
|
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
@ -786,9 +788,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
*/
|
*/
|
||||||
void amdgpu_driver_lastclose_kms(struct drm_device *dev)
|
void amdgpu_driver_lastclose_kms(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
drm_fb_helper_lastclose(dev);
|
||||||
|
|
||||||
amdgpu_fbdev_restore_mode(adev);
|
|
||||||
vga_switcheroo_process_delayed_switch();
|
vga_switcheroo_process_delayed_switch();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -89,7 +89,6 @@ enum amdgpu_hpd_id {
|
||||||
AMDGPU_HPD_4,
|
AMDGPU_HPD_4,
|
||||||
AMDGPU_HPD_5,
|
AMDGPU_HPD_5,
|
||||||
AMDGPU_HPD_6,
|
AMDGPU_HPD_6,
|
||||||
AMDGPU_HPD_LAST,
|
|
||||||
AMDGPU_HPD_NONE = 0xff,
|
AMDGPU_HPD_NONE = 0xff,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -106,7 +105,6 @@ enum amdgpu_crtc_irq {
|
||||||
AMDGPU_CRTC_IRQ_VLINE4,
|
AMDGPU_CRTC_IRQ_VLINE4,
|
||||||
AMDGPU_CRTC_IRQ_VLINE5,
|
AMDGPU_CRTC_IRQ_VLINE5,
|
||||||
AMDGPU_CRTC_IRQ_VLINE6,
|
AMDGPU_CRTC_IRQ_VLINE6,
|
||||||
AMDGPU_CRTC_IRQ_LAST,
|
|
||||||
AMDGPU_CRTC_IRQ_NONE = 0xff
|
AMDGPU_CRTC_IRQ_NONE = 0xff
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -117,7 +115,6 @@ enum amdgpu_pageflip_irq {
|
||||||
AMDGPU_PAGEFLIP_IRQ_D4,
|
AMDGPU_PAGEFLIP_IRQ_D4,
|
||||||
AMDGPU_PAGEFLIP_IRQ_D5,
|
AMDGPU_PAGEFLIP_IRQ_D5,
|
||||||
AMDGPU_PAGEFLIP_IRQ_D6,
|
AMDGPU_PAGEFLIP_IRQ_D6,
|
||||||
AMDGPU_PAGEFLIP_IRQ_LAST,
|
|
||||||
AMDGPU_PAGEFLIP_IRQ_NONE = 0xff
|
AMDGPU_PAGEFLIP_IRQ_NONE = 0xff
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -661,10 +658,6 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev);
|
||||||
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
|
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
|
||||||
int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
|
int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
|
||||||
bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
|
bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
|
||||||
void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev);
|
|
||||||
|
|
||||||
void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
|
|
||||||
|
|
||||||
|
|
||||||
int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
|
int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
|
||||||
|
|
||||||
|
|
|
@ -281,6 +281,44 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
|
||||||
*cpu_addr = NULL;
|
*cpu_addr = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Validate bo size is bit bigger then the request domain */
|
||||||
|
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
|
||||||
|
unsigned long size, u32 domain)
|
||||||
|
{
|
||||||
|
struct ttm_mem_type_manager *man = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If GTT is part of requested domains the check must succeed to
|
||||||
|
* allow fall back to GTT
|
||||||
|
*/
|
||||||
|
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
|
||||||
|
man = &adev->mman.bdev.man[TTM_PL_TT];
|
||||||
|
|
||||||
|
if (size < (man->size << PAGE_SHIFT))
|
||||||
|
return true;
|
||||||
|
else
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
|
||||||
|
man = &adev->mman.bdev.man[TTM_PL_VRAM];
|
||||||
|
|
||||||
|
if (size < (man->size << PAGE_SHIFT))
|
||||||
|
return true;
|
||||||
|
else
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
|
||||||
|
return true;
|
||||||
|
|
||||||
|
fail:
|
||||||
|
DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
|
||||||
|
man->size << PAGE_SHIFT);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||||
unsigned long size, int byte_align,
|
unsigned long size, int byte_align,
|
||||||
bool kernel, u32 domain, u64 flags,
|
bool kernel, u32 domain, u64 flags,
|
||||||
|
@ -289,16 +327,19 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||||
uint64_t init_value,
|
uint64_t init_value,
|
||||||
struct amdgpu_bo **bo_ptr)
|
struct amdgpu_bo **bo_ptr)
|
||||||
{
|
{
|
||||||
|
struct ttm_operation_ctx ctx = { !kernel, false };
|
||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
enum ttm_bo_type type;
|
enum ttm_bo_type type;
|
||||||
unsigned long page_align;
|
unsigned long page_align;
|
||||||
u64 initial_bytes_moved, bytes_moved;
|
|
||||||
size_t acc_size;
|
size_t acc_size;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
|
page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
|
||||||
size = ALIGN(size, PAGE_SIZE);
|
size = ALIGN(size, PAGE_SIZE);
|
||||||
|
|
||||||
|
if (!amdgpu_bo_validate_size(adev, size, domain))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
if (kernel) {
|
if (kernel) {
|
||||||
type = ttm_bo_type_kernel;
|
type = ttm_bo_type_kernel;
|
||||||
} else if (sg) {
|
} else if (sg) {
|
||||||
|
@ -364,22 +405,19 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||||
bo->tbo.bdev = &adev->mman.bdev;
|
bo->tbo.bdev = &adev->mman.bdev;
|
||||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||||
|
|
||||||
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
|
||||||
/* Kernel allocation are uninterruptible */
|
|
||||||
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
|
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
|
||||||
&bo->placement, page_align, !kernel, NULL,
|
&bo->placement, page_align, &ctx, NULL,
|
||||||
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
|
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
|
|
||||||
initial_bytes_moved;
|
|
||||||
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
|
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
|
||||||
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||||
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
|
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
|
||||||
amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
|
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
|
||||||
|
ctx.bytes_moved);
|
||||||
else
|
else
|
||||||
amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
|
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
|
||||||
|
|
||||||
if (kernel)
|
if (kernel)
|
||||||
bo->tbo.priority = 1;
|
bo->tbo.priority = 1;
|
||||||
|
@ -511,6 +549,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
|
||||||
|
|
||||||
int amdgpu_bo_validate(struct amdgpu_bo *bo)
|
int amdgpu_bo_validate(struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
uint32_t domain;
|
uint32_t domain;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
@ -521,7 +560,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||||
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
||||||
domain = bo->allowed_domains;
|
domain = bo->allowed_domains;
|
||||||
goto retry;
|
goto retry;
|
||||||
|
@ -632,6 +671,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||||
u64 *gpu_addr)
|
u64 *gpu_addr)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||||
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
int r, i;
|
int r, i;
|
||||||
|
|
||||||
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
|
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
|
||||||
|
@ -647,7 +687,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||||
if (bo->pin_count) {
|
if (bo->pin_count) {
|
||||||
uint32_t mem_type = bo->tbo.mem.mem_type;
|
uint32_t mem_type = bo->tbo.mem.mem_type;
|
||||||
|
|
||||||
if (domain != amdgpu_mem_type_to_domain(mem_type))
|
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
bo->pin_count++;
|
bo->pin_count++;
|
||||||
|
@ -682,21 +722,23 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||||
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
|
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
dev_err(adev->dev, "%p pin failed\n", bo);
|
dev_err(adev->dev, "%p pin failed\n", bo);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
bo->pin_count = 1;
|
r = amdgpu_ttm_alloc_gart(&bo->tbo);
|
||||||
if (gpu_addr != NULL) {
|
if (unlikely(r)) {
|
||||||
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
|
dev_err(adev->dev, "%p bind failed\n", bo);
|
||||||
if (unlikely(r)) {
|
goto error;
|
||||||
dev_err(adev->dev, "%p bind failed\n", bo);
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
*gpu_addr = amdgpu_bo_gpu_offset(bo);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bo->pin_count = 1;
|
||||||
|
if (gpu_addr != NULL)
|
||||||
|
*gpu_addr = amdgpu_bo_gpu_offset(bo);
|
||||||
|
|
||||||
|
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||||
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||||
adev->vram_pin_size += amdgpu_bo_size(bo);
|
adev->vram_pin_size += amdgpu_bo_size(bo);
|
||||||
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
|
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
|
||||||
|
@ -717,6 +759,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
|
||||||
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
|
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||||
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
int r, i;
|
int r, i;
|
||||||
|
|
||||||
if (!bo->pin_count) {
|
if (!bo->pin_count) {
|
||||||
|
@ -730,7 +773,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
|
||||||
bo->placements[i].lpfn = 0;
|
bo->placements[i].lpfn = 0;
|
||||||
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
|
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
|
||||||
}
|
}
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
|
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -779,8 +822,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
|
||||||
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
|
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
|
||||||
adev->mc.aper_size);
|
adev->mc.aper_size);
|
||||||
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
|
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
|
||||||
adev->mc.mc_vram_size >> 20,
|
adev->mc.mc_vram_size >> 20,
|
||||||
(unsigned long long)adev->mc.aper_size >> 20);
|
(unsigned long long)adev->mc.aper_size >> 20);
|
||||||
DRM_INFO("RAM width %dbits %s\n",
|
DRM_INFO("RAM width %dbits %s\n",
|
||||||
adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
|
adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
|
||||||
return amdgpu_ttm_init(adev);
|
return amdgpu_ttm_init(adev);
|
||||||
|
@ -902,6 +945,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
||||||
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||||
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
struct amdgpu_bo *abo;
|
struct amdgpu_bo *abo;
|
||||||
unsigned long offset, size;
|
unsigned long offset, size;
|
||||||
int r;
|
int r;
|
||||||
|
@ -935,7 +979,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||||
abo->placement.num_busy_placement = 1;
|
abo->placement.num_busy_placement = 1;
|
||||||
abo->placement.busy_placement = &abo->placements[1];
|
abo->placement.busy_placement = &abo->placements[1];
|
||||||
|
|
||||||
r = ttm_bo_validate(bo, &abo->placement, false, false);
|
r = ttm_bo_validate(bo, &abo->placement, &ctx);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -980,7 +1024,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
|
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
|
||||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
|
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
|
||||||
!amdgpu_ttm_is_bound(bo->tbo.ttm));
|
!amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
|
||||||
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
|
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
|
||||||
!bo->pin_count);
|
!bo->pin_count);
|
||||||
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
||||||
|
|
|
@ -187,7 +187,7 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
|
||||||
static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
|
static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
switch (bo->tbo.mem.mem_type) {
|
switch (bo->tbo.mem.mem_type) {
|
||||||
case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm);
|
case TTM_PL_TT: return amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem);
|
||||||
case TTM_PL_VRAM: return true;
|
case TTM_PL_VRAM: return true;
|
||||||
default: return false;
|
default: return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,6 @@
|
||||||
#include <linux/hwmon.h>
|
#include <linux/hwmon.h>
|
||||||
#include <linux/hwmon-sysfs.h>
|
#include <linux/hwmon-sysfs.h>
|
||||||
|
|
||||||
#include "amd_powerplay.h"
|
|
||||||
|
|
||||||
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
|
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
|
||||||
|
|
||||||
|
|
|
@ -264,7 +264,7 @@ static int psp_hw_start(struct psp_context *psp)
|
||||||
struct amdgpu_device *adev = psp->adev;
|
struct amdgpu_device *adev = psp->adev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
|
if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
|
||||||
ret = psp_bootloader_load_sysdrv(psp);
|
ret = psp_bootloader_load_sysdrv(psp);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -334,23 +334,26 @@ static int psp_load_fw(struct amdgpu_device *adev)
|
||||||
int ret;
|
int ret;
|
||||||
struct psp_context *psp = &adev->psp;
|
struct psp_context *psp = &adev->psp;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
|
||||||
|
goto skip_memalloc;
|
||||||
|
|
||||||
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||||
if (!psp->cmd)
|
if (!psp->cmd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
|
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
|
||||||
AMDGPU_GEM_DOMAIN_GTT,
|
AMDGPU_GEM_DOMAIN_GTT,
|
||||||
&psp->fw_pri_bo,
|
&psp->fw_pri_bo,
|
||||||
&psp->fw_pri_mc_addr,
|
&psp->fw_pri_mc_addr,
|
||||||
&psp->fw_pri_buf);
|
&psp->fw_pri_buf);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto failed;
|
goto failed;
|
||||||
|
|
||||||
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
|
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
&psp->fence_buf_bo,
|
&psp->fence_buf_bo,
|
||||||
&psp->fence_buf_mc_addr,
|
&psp->fence_buf_mc_addr,
|
||||||
&psp->fence_buf);
|
&psp->fence_buf);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto failed_mem2;
|
goto failed_mem2;
|
||||||
|
|
||||||
|
@ -375,6 +378,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto failed_mem;
|
goto failed_mem;
|
||||||
|
|
||||||
|
skip_memalloc:
|
||||||
ret = psp_hw_start(psp);
|
ret = psp_hw_start(psp);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto failed_mem;
|
goto failed_mem;
|
||||||
|
|
|
@ -225,7 +225,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
|
||||||
|
|
||||||
/* Right now all IPs have only one instance - multiple rings. */
|
/* Right now all IPs have only one instance - multiple rings. */
|
||||||
if (instance != 0) {
|
if (instance != 0) {
|
||||||
DRM_ERROR("invalid ip instance: %d\n", instance);
|
DRM_DEBUG("invalid ip instance: %d\n", instance);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,13 +255,13 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
|
||||||
ip_num_rings = adev->vcn.num_enc_rings;
|
ip_num_rings = adev->vcn.num_enc_rings;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
DRM_ERROR("unknown ip type: %d\n", hw_ip);
|
DRM_DEBUG("unknown ip type: %d\n", hw_ip);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ring >= ip_num_rings) {
|
if (ring >= ip_num_rings) {
|
||||||
DRM_ERROR("Ring index:%d exceeds maximum:%d for ip:%d\n",
|
DRM_DEBUG("Ring index:%d exceeds maximum:%d for ip:%d\n",
|
||||||
ring, ip_num_rings, hw_ip);
|
ring, ip_num_rings, hw_ip);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,7 +292,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
|
||||||
default:
|
default:
|
||||||
*out_ring = NULL;
|
*out_ring = NULL;
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
|
DRM_DEBUG("unknown HW IP type: %d\n", mapper->hw_ip);
|
||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
|
|
@ -79,8 +79,7 @@ struct amdgpu_fence_driver {
|
||||||
|
|
||||||
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
|
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
|
||||||
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
|
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
|
||||||
void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring);
|
|
||||||
|
|
||||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||||
unsigned num_hw_submission);
|
unsigned num_hw_submission);
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
struct amdgpu_sync_entry {
|
struct amdgpu_sync_entry {
|
||||||
struct hlist_node node;
|
struct hlist_node node;
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
|
bool explicit;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct kmem_cache *amdgpu_sync_slab;
|
static struct kmem_cache *amdgpu_sync_slab;
|
||||||
|
@ -141,7 +142,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||||
struct dma_fence *f)
|
struct dma_fence *f, bool explicit)
|
||||||
{
|
{
|
||||||
struct amdgpu_sync_entry *e;
|
struct amdgpu_sync_entry *e;
|
||||||
|
|
||||||
|
@ -159,6 +160,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||||
if (!e)
|
if (!e)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
e->explicit = explicit;
|
||||||
|
|
||||||
hash_add(sync->fences, &e->node, f->context);
|
hash_add(sync->fences, &e->node, f->context);
|
||||||
e->fence = dma_fence_get(f);
|
e->fence = dma_fence_get(f);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -189,10 +192,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||||
|
|
||||||
/* always sync to the exclusive fence */
|
/* always sync to the exclusive fence */
|
||||||
f = reservation_object_get_excl(resv);
|
f = reservation_object_get_excl(resv);
|
||||||
r = amdgpu_sync_fence(adev, sync, f);
|
r = amdgpu_sync_fence(adev, sync, f, false);
|
||||||
|
|
||||||
if (explicit_sync)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
flist = reservation_object_get_list(resv);
|
flist = reservation_object_get_list(resv);
|
||||||
if (!flist || r)
|
if (!flist || r)
|
||||||
|
@ -212,15 +212,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||||
(fence_owner == AMDGPU_FENCE_OWNER_VM)))
|
(fence_owner == AMDGPU_FENCE_OWNER_VM)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Ignore fence from the same owner as
|
/* Ignore fence from the same owner and explicit one as
|
||||||
* long as it isn't undefined.
|
* long as it isn't undefined.
|
||||||
*/
|
*/
|
||||||
if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
|
if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
|
||||||
fence_owner == owner)
|
(fence_owner == owner || explicit_sync))
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_sync_fence(adev, sync, f);
|
r = amdgpu_sync_fence(adev, sync, f, false);
|
||||||
if (r)
|
if (r)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -275,19 +275,21 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
||||||
* amdgpu_sync_get_fence - get the next fence from the sync object
|
* amdgpu_sync_get_fence - get the next fence from the sync object
|
||||||
*
|
*
|
||||||
* @sync: sync object to use
|
* @sync: sync object to use
|
||||||
|
* @explicit: true if the next fence is explicit
|
||||||
*
|
*
|
||||||
* Get and removes the next fence from the sync object not signaled yet.
|
* Get and removes the next fence from the sync object not signaled yet.
|
||||||
*/
|
*/
|
||||||
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
|
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
|
||||||
{
|
{
|
||||||
struct amdgpu_sync_entry *e;
|
struct amdgpu_sync_entry *e;
|
||||||
struct hlist_node *tmp;
|
struct hlist_node *tmp;
|
||||||
struct dma_fence *f;
|
struct dma_fence *f;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
hash_for_each_safe(sync->fences, i, tmp, e, node) {
|
hash_for_each_safe(sync->fences, i, tmp, e, node) {
|
||||||
|
|
||||||
f = e->fence;
|
f = e->fence;
|
||||||
|
if (explicit)
|
||||||
|
*explicit = e->explicit;
|
||||||
|
|
||||||
hash_del(&e->node);
|
hash_del(&e->node);
|
||||||
kmem_cache_free(amdgpu_sync_slab, e);
|
kmem_cache_free(amdgpu_sync_slab, e);
|
||||||
|
|
|
@ -41,7 +41,7 @@ struct amdgpu_sync {
|
||||||
|
|
||||||
void amdgpu_sync_create(struct amdgpu_sync *sync);
|
void amdgpu_sync_create(struct amdgpu_sync *sync);
|
||||||
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||||
struct dma_fence *f);
|
struct dma_fence *f, bool explicit);
|
||||||
int amdgpu_sync_resv(struct amdgpu_device *adev,
|
int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||||
struct amdgpu_sync *sync,
|
struct amdgpu_sync *sync,
|
||||||
struct reservation_object *resv,
|
struct reservation_object *resv,
|
||||||
|
@ -49,7 +49,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||||
bool explicit_sync);
|
bool explicit_sync);
|
||||||
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
||||||
struct amdgpu_ring *ring);
|
struct amdgpu_ring *ring);
|
||||||
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
|
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit);
|
||||||
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
|
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
|
||||||
void amdgpu_sync_free(struct amdgpu_sync *sync);
|
void amdgpu_sync_free(struct amdgpu_sync *sync);
|
||||||
int amdgpu_sync_init(void);
|
int amdgpu_sync_init(void);
|
||||||
|
|
|
@ -110,7 +110,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
|
||||||
ring = adev->mman.buffer_funcs_ring;
|
ring = adev->mman.buffer_funcs_ring;
|
||||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
|
||||||
r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
|
r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
|
||||||
rq, amdgpu_sched_jobs);
|
rq, amdgpu_sched_jobs, NULL);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
|
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
|
||||||
goto error_entity;
|
goto error_entity;
|
||||||
|
@ -282,8 +282,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
|
||||||
{
|
{
|
||||||
uint64_t addr = 0;
|
uint64_t addr = 0;
|
||||||
|
|
||||||
if (mem->mem_type != TTM_PL_TT ||
|
if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
|
||||||
amdgpu_gtt_mgr_is_allocated(mem)) {
|
|
||||||
addr = mm_node->start << PAGE_SHIFT;
|
addr = mm_node->start << PAGE_SHIFT;
|
||||||
addr += bo->bdev->man[mem->mem_type].gpu_offset;
|
addr += bo->bdev->man[mem->mem_type].gpu_offset;
|
||||||
}
|
}
|
||||||
|
@ -369,7 +368,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
||||||
* dst to window 1
|
* dst to window 1
|
||||||
*/
|
*/
|
||||||
if (src->mem->mem_type == TTM_PL_TT &&
|
if (src->mem->mem_type == TTM_PL_TT &&
|
||||||
!amdgpu_gtt_mgr_is_allocated(src->mem)) {
|
!amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
|
||||||
r = amdgpu_map_buffer(src->bo, src->mem,
|
r = amdgpu_map_buffer(src->bo, src->mem,
|
||||||
PFN_UP(cur_size + src_page_offset),
|
PFN_UP(cur_size + src_page_offset),
|
||||||
src_node_start, 0, ring,
|
src_node_start, 0, ring,
|
||||||
|
@ -383,7 +382,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dst->mem->mem_type == TTM_PL_TT &&
|
if (dst->mem->mem_type == TTM_PL_TT &&
|
||||||
!amdgpu_gtt_mgr_is_allocated(dst->mem)) {
|
!amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
|
||||||
r = amdgpu_map_buffer(dst->bo, dst->mem,
|
r = amdgpu_map_buffer(dst->bo, dst->mem,
|
||||||
PFN_UP(cur_size + dst_page_offset),
|
PFN_UP(cur_size + dst_page_offset),
|
||||||
dst_node_start, 1, ring,
|
dst_node_start, 1, ring,
|
||||||
|
@ -467,9 +466,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
|
static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
|
||||||
bool evict, bool interruptible,
|
struct ttm_operation_ctx *ctx,
|
||||||
bool no_wait_gpu,
|
|
||||||
struct ttm_mem_reg *new_mem)
|
struct ttm_mem_reg *new_mem)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
|
@ -489,8 +487,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
|
||||||
placements.fpfn = 0;
|
placements.fpfn = 0;
|
||||||
placements.lpfn = 0;
|
placements.lpfn = 0;
|
||||||
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
|
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
|
||||||
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
|
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
|
||||||
interruptible, no_wait_gpu);
|
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -504,19 +501,18 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
|
r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
|
r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, new_mem);
|
||||||
out_cleanup:
|
out_cleanup:
|
||||||
ttm_bo_mem_put(bo, &tmp_mem);
|
ttm_bo_mem_put(bo, &tmp_mem);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
|
static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
|
||||||
bool evict, bool interruptible,
|
struct ttm_operation_ctx *ctx,
|
||||||
bool no_wait_gpu,
|
|
||||||
struct ttm_mem_reg *new_mem)
|
struct ttm_mem_reg *new_mem)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
|
@ -536,16 +532,15 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
|
||||||
placements.fpfn = 0;
|
placements.fpfn = 0;
|
||||||
placements.lpfn = 0;
|
placements.lpfn = 0;
|
||||||
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
|
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
|
||||||
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
|
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
|
||||||
interruptible, no_wait_gpu);
|
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
|
r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, &tmp_mem);
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
|
r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
|
@ -554,10 +549,9 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_bo_move(struct ttm_buffer_object *bo,
|
static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
|
||||||
bool evict, bool interruptible,
|
struct ttm_operation_ctx *ctx,
|
||||||
bool no_wait_gpu,
|
struct ttm_mem_reg *new_mem)
|
||||||
struct ttm_mem_reg *new_mem)
|
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
struct amdgpu_bo *abo;
|
struct amdgpu_bo *abo;
|
||||||
|
@ -592,19 +586,19 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
|
||||||
|
|
||||||
if (old_mem->mem_type == TTM_PL_VRAM &&
|
if (old_mem->mem_type == TTM_PL_VRAM &&
|
||||||
new_mem->mem_type == TTM_PL_SYSTEM) {
|
new_mem->mem_type == TTM_PL_SYSTEM) {
|
||||||
r = amdgpu_move_vram_ram(bo, evict, interruptible,
|
r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
|
||||||
no_wait_gpu, new_mem);
|
|
||||||
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
|
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
|
||||||
new_mem->mem_type == TTM_PL_VRAM) {
|
new_mem->mem_type == TTM_PL_VRAM) {
|
||||||
r = amdgpu_move_ram_vram(bo, evict, interruptible,
|
r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
|
||||||
no_wait_gpu, new_mem);
|
|
||||||
} else {
|
} else {
|
||||||
r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
|
r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
|
||||||
|
new_mem, old_mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
memcpy:
|
memcpy:
|
||||||
r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
|
r = ttm_bo_move_memcpy(bo, ctx->interruptible,
|
||||||
|
ctx->no_wait_gpu, new_mem);
|
||||||
if (r) {
|
if (r) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -690,7 +684,6 @@ struct amdgpu_ttm_tt {
|
||||||
struct list_head guptasks;
|
struct list_head guptasks;
|
||||||
atomic_t mmu_invalidations;
|
atomic_t mmu_invalidations;
|
||||||
uint32_t last_set_pages;
|
uint32_t last_set_pages;
|
||||||
struct list_head list;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
||||||
|
@ -861,44 +854,35 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
|
||||||
bo_mem->mem_type == AMDGPU_PL_OA)
|
bo_mem->mem_type == AMDGPU_PL_OA)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!amdgpu_gtt_mgr_is_allocated(bo_mem))
|
if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
|
||||||
|
gtt->offset = AMDGPU_BO_INVALID_OFFSET;
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock(>t->adev->gtt_list_lock);
|
|
||||||
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
|
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
|
||||||
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
|
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
|
||||||
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
|
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
|
||||||
ttm->pages, gtt->ttm.dma_address, flags);
|
ttm->pages, gtt->ttm.dma_address, flags);
|
||||||
|
|
||||||
if (r) {
|
if (r)
|
||||||
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
|
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
|
||||||
ttm->num_pages, gtt->offset);
|
ttm->num_pages, gtt->offset);
|
||||||
goto error_gart_bind;
|
|
||||||
}
|
|
||||||
|
|
||||||
list_add_tail(>t->list, >t->adev->gtt_list);
|
|
||||||
error_gart_bind:
|
|
||||||
spin_unlock(>t->adev->gtt_list_lock);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
|
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
||||||
{
|
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
|
||||||
|
|
||||||
return gtt && !list_empty(>t->list);
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
|
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||||
struct ttm_tt *ttm = bo->ttm;
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
|
struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
|
||||||
struct ttm_mem_reg tmp;
|
struct ttm_mem_reg tmp;
|
||||||
struct ttm_placement placement;
|
struct ttm_placement placement;
|
||||||
struct ttm_place placements;
|
struct ttm_place placements;
|
||||||
|
uint64_t flags;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (!ttm || amdgpu_ttm_is_bound(ttm))
|
if (bo->mem.mem_type != TTM_PL_TT ||
|
||||||
|
amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
tmp = bo->mem;
|
tmp = bo->mem;
|
||||||
|
@ -912,43 +896,44 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
|
||||||
placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
|
placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
|
||||||
TTM_PL_FLAG_TT;
|
TTM_PL_FLAG_TT;
|
||||||
|
|
||||||
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
|
r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = ttm_bo_move_ttm(bo, true, false, &tmp);
|
flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
|
||||||
if (unlikely(r))
|
gtt->offset = (u64)tmp.start << PAGE_SHIFT;
|
||||||
|
r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
|
||||||
|
bo->ttm->pages, gtt->ttm.dma_address, flags);
|
||||||
|
if (unlikely(r)) {
|
||||||
ttm_bo_mem_put(bo, &tmp);
|
ttm_bo_mem_put(bo, &tmp);
|
||||||
else
|
return r;
|
||||||
bo->offset = (bo->mem.start << PAGE_SHIFT) +
|
}
|
||||||
bo->bdev->man[bo->mem.mem_type].gpu_offset;
|
|
||||||
|
|
||||||
return r;
|
ttm_bo_mem_put(bo, &bo->mem);
|
||||||
|
bo->mem = tmp;
|
||||||
|
bo->offset = (bo->mem.start << PAGE_SHIFT) +
|
||||||
|
bo->bdev->man[bo->mem.mem_type].gpu_offset;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
|
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
|
||||||
{
|
{
|
||||||
struct amdgpu_ttm_tt *gtt, *tmp;
|
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
||||||
struct ttm_mem_reg bo_mem;
|
struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
|
||||||
uint64_t flags;
|
uint64_t flags;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
bo_mem.mem_type = TTM_PL_TT;
|
if (!gtt)
|
||||||
spin_lock(&adev->gtt_list_lock);
|
return 0;
|
||||||
list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
|
|
||||||
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, >t->ttm.ttm, &bo_mem);
|
flags = amdgpu_ttm_tt_pte_flags(adev, >t->ttm.ttm, &tbo->mem);
|
||||||
r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
|
r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
|
||||||
gtt->ttm.ttm.pages, gtt->ttm.dma_address,
|
gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
|
||||||
flags);
|
if (r)
|
||||||
if (r) {
|
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
|
||||||
spin_unlock(&adev->gtt_list_lock);
|
gtt->ttm.ttm.num_pages, gtt->offset);
|
||||||
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
|
return r;
|
||||||
gtt->ttm.ttm.num_pages, gtt->offset);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock(&adev->gtt_list_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||||
|
@ -959,20 +944,14 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||||
if (gtt->userptr)
|
if (gtt->userptr)
|
||||||
amdgpu_ttm_tt_unpin_userptr(ttm);
|
amdgpu_ttm_tt_unpin_userptr(ttm);
|
||||||
|
|
||||||
if (!amdgpu_ttm_is_bound(ttm))
|
if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
|
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
|
||||||
spin_lock(>t->adev->gtt_list_lock);
|
|
||||||
r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
|
r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
|
||||||
if (r) {
|
if (r)
|
||||||
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
|
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
|
||||||
gtt->ttm.ttm.num_pages, gtt->offset);
|
gtt->ttm.ttm.num_pages, gtt->offset);
|
||||||
goto error_unbind;
|
|
||||||
}
|
|
||||||
list_del_init(>t->list);
|
|
||||||
error_unbind:
|
|
||||||
spin_unlock(>t->adev->gtt_list_lock);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1009,7 +988,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||||
kfree(gtt);
|
kfree(gtt);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
INIT_LIST_HEAD(>t->list);
|
|
||||||
return >t->ttm.ttm;
|
return >t->ttm.ttm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1348,10 +1326,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||||
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
|
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
|
||||||
(unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
|
(unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
|
||||||
|
|
||||||
if (amdgpu_gtt_size == -1)
|
if (amdgpu_gtt_size == -1) {
|
||||||
gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
|
struct sysinfo si;
|
||||||
adev->mc.mc_vram_size);
|
|
||||||
else
|
si_meminfo(&si);
|
||||||
|
gtt_size = max(AMDGPU_DEFAULT_GTT_SIZE_MB << 20,
|
||||||
|
(uint64_t)si.totalram * si.mem_unit * 3/4);
|
||||||
|
} else
|
||||||
gtt_size = (uint64_t)amdgpu_gtt_size << 20;
|
gtt_size = (uint64_t)amdgpu_gtt_size << 20;
|
||||||
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
|
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
@ -1410,19 +1391,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||||
|
|
||||||
void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int r;
|
|
||||||
|
|
||||||
if (!adev->mman.initialized)
|
if (!adev->mman.initialized)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
amdgpu_ttm_debugfs_fini(adev);
|
amdgpu_ttm_debugfs_fini(adev);
|
||||||
if (adev->stolen_vga_memory) {
|
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
|
||||||
r = amdgpu_bo_reserve(adev->stolen_vga_memory, true);
|
amdgpu_fw_reserve_vram_fini(adev);
|
||||||
if (r == 0) {
|
|
||||||
amdgpu_bo_unpin(adev->stolen_vga_memory);
|
|
||||||
amdgpu_bo_unreserve(adev->stolen_vga_memory);
|
|
||||||
}
|
|
||||||
amdgpu_bo_unref(&adev->stolen_vga_memory);
|
|
||||||
}
|
|
||||||
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
|
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
|
||||||
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
|
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
|
||||||
if (adev->gds.mem.total_size)
|
if (adev->gds.mem.total_size)
|
||||||
|
@ -1432,7 +1407,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
||||||
if (adev->gds.oa.total_size)
|
if (adev->gds.oa.total_size)
|
||||||
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
|
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
|
||||||
ttm_bo_device_release(&adev->mman.bdev);
|
ttm_bo_device_release(&adev->mman.bdev);
|
||||||
amdgpu_gart_fini(adev);
|
|
||||||
amdgpu_ttm_global_fini(adev);
|
amdgpu_ttm_global_fini(adev);
|
||||||
adev->mman.initialized = false;
|
adev->mman.initialized = false;
|
||||||
DRM_INFO("amdgpu: ttm finalized\n");
|
DRM_INFO("amdgpu: ttm finalized\n");
|
||||||
|
@ -1628,7 +1602,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bo->tbo.mem.mem_type == TTM_PL_TT) {
|
if (bo->tbo.mem.mem_type == TTM_PL_TT) {
|
||||||
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
|
r = amdgpu_ttm_alloc_gart(&bo->tbo);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,8 +67,9 @@ struct amdgpu_copy_mem {
|
||||||
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
|
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
|
||||||
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
|
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
|
||||||
|
|
||||||
bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem);
|
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
|
||||||
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
|
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
|
||||||
|
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
|
||||||
|
|
||||||
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
|
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
|
||||||
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
|
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
|
||||||
|
@ -90,9 +91,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||||
struct dma_fence **fence);
|
struct dma_fence **fence);
|
||||||
|
|
||||||
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
|
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||||
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
|
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
|
||||||
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
|
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
|
||||||
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
|
|
||||||
|
|
||||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
|
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
|
||||||
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
|
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
|
||||||
|
|
|
@ -359,7 +359,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
|
||||||
|
|
||||||
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
|
|
||||||
uint64_t fw_offset = 0;
|
uint64_t fw_offset = 0;
|
||||||
int i, err;
|
int i, err;
|
||||||
struct amdgpu_firmware_info *ucode = NULL;
|
struct amdgpu_firmware_info *ucode = NULL;
|
||||||
|
@ -370,36 +369,16 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
|
if (!adev->in_gpu_reset) {
|
||||||
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
|
err = amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
|
||||||
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
&adev->firmware.fw_buf,
|
||||||
NULL, NULL, 0, bo);
|
&adev->firmware.fw_buf_mc,
|
||||||
|
&adev->firmware.fw_buf_ptr);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
|
dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n");
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = amdgpu_bo_reserve(*bo, false);
|
|
||||||
if (err) {
|
|
||||||
dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
|
|
||||||
goto failed_reserve;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
|
||||||
&adev->firmware.fw_buf_mc);
|
|
||||||
if (err) {
|
|
||||||
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
|
|
||||||
goto failed_pin;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = amdgpu_bo_kmap(*bo, &adev->firmware.fw_buf_ptr);
|
|
||||||
if (err) {
|
|
||||||
dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
|
|
||||||
goto failed_kmap;
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_bo_unreserve(*bo);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
|
memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
|
||||||
|
@ -436,12 +415,6 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
failed_kmap:
|
|
||||||
amdgpu_bo_unpin(*bo);
|
|
||||||
failed_pin:
|
|
||||||
amdgpu_bo_unreserve(*bo);
|
|
||||||
failed_reserve:
|
|
||||||
amdgpu_bo_unref(bo);
|
|
||||||
failed:
|
failed:
|
||||||
if (err)
|
if (err)
|
||||||
adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
|
adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
|
||||||
|
@ -464,8 +437,10 @@ int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
|
||||||
ucode->kaddr = NULL;
|
ucode->kaddr = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
amdgpu_bo_unref(&adev->firmware.fw_buf);
|
|
||||||
adev->firmware.fw_buf = NULL;
|
amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
|
||||||
|
&adev->firmware.fw_buf_mc,
|
||||||
|
&adev->firmware.fw_buf_ptr);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -232,7 +232,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
||||||
ring = &adev->uvd.ring;
|
ring = &adev->uvd.ring;
|
||||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||||
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
|
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
|
||||||
rq, amdgpu_sched_jobs);
|
rq, amdgpu_sched_jobs, NULL);
|
||||||
if (r != 0) {
|
if (r != 0) {
|
||||||
DRM_ERROR("Failed setting up UVD run queue.\n");
|
DRM_ERROR("Failed setting up UVD run queue.\n");
|
||||||
return r;
|
return r;
|
||||||
|
@ -408,6 +408,7 @@ static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
|
||||||
*/
|
*/
|
||||||
static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
|
static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
|
||||||
{
|
{
|
||||||
|
struct ttm_operation_ctx tctx = { false, false };
|
||||||
struct amdgpu_bo_va_mapping *mapping;
|
struct amdgpu_bo_va_mapping *mapping;
|
||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
uint32_t cmd;
|
uint32_t cmd;
|
||||||
|
@ -430,7 +431,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
|
||||||
}
|
}
|
||||||
amdgpu_uvd_force_into_uvd_segment(bo);
|
amdgpu_uvd_force_into_uvd_segment(bo);
|
||||||
|
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
@ -949,6 +950,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
|
||||||
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||||
bool direct, struct dma_fence **fence)
|
bool direct, struct dma_fence **fence)
|
||||||
{
|
{
|
||||||
|
struct ttm_operation_ctx ctx = { true, false };
|
||||||
struct ttm_validate_buffer tv;
|
struct ttm_validate_buffer tv;
|
||||||
struct ww_acquire_ctx ticket;
|
struct ww_acquire_ctx ticket;
|
||||||
struct list_head head;
|
struct list_head head;
|
||||||
|
@ -975,7 +977,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||||
amdgpu_uvd_force_into_uvd_segment(bo);
|
amdgpu_uvd_force_into_uvd_segment(bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||||
if (r)
|
if (r)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@ -1218,7 +1220,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
} else if (r < 0) {
|
} else if (r < 0) {
|
||||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||||
} else {
|
} else {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,10 @@
|
||||||
#define AMDGPU_UVD_SESSION_SIZE (50*1024)
|
#define AMDGPU_UVD_SESSION_SIZE (50*1024)
|
||||||
#define AMDGPU_UVD_FIRMWARE_OFFSET 256
|
#define AMDGPU_UVD_FIRMWARE_OFFSET 256
|
||||||
|
|
||||||
|
#define AMDGPU_UVD_FIRMWARE_SIZE(adev) \
|
||||||
|
(AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \
|
||||||
|
8) - AMDGPU_UVD_FIRMWARE_OFFSET)
|
||||||
|
|
||||||
struct amdgpu_uvd {
|
struct amdgpu_uvd {
|
||||||
struct amdgpu_bo *vcpu_bo;
|
struct amdgpu_bo *vcpu_bo;
|
||||||
void *cpu_addr;
|
void *cpu_addr;
|
||||||
|
|
|
@ -176,7 +176,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
||||||
ring = &adev->vce.ring[0];
|
ring = &adev->vce.ring[0];
|
||||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||||
r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
|
r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
|
||||||
rq, amdgpu_sched_jobs);
|
rq, amdgpu_sched_jobs, NULL);
|
||||||
if (r != 0) {
|
if (r != 0) {
|
||||||
DRM_ERROR("Failed setting up VCE run queue.\n");
|
DRM_ERROR("Failed setting up VCE run queue.\n");
|
||||||
return r;
|
return r;
|
||||||
|
@ -543,6 +543,55 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary
|
||||||
|
*
|
||||||
|
* @p: parser context
|
||||||
|
* @lo: address of lower dword
|
||||||
|
* @hi: address of higher dword
|
||||||
|
* @size: minimum size
|
||||||
|
* @index: bs/fb index
|
||||||
|
*
|
||||||
|
* Make sure that no BO cross a 4GB boundary.
|
||||||
|
*/
|
||||||
|
static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
|
||||||
|
int lo, int hi, unsigned size, int32_t index)
|
||||||
|
{
|
||||||
|
int64_t offset = ((uint64_t)size) * ((int64_t)index);
|
||||||
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
|
struct amdgpu_bo_va_mapping *mapping;
|
||||||
|
unsigned i, fpfn, lpfn;
|
||||||
|
struct amdgpu_bo *bo;
|
||||||
|
uint64_t addr;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
|
||||||
|
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
|
||||||
|
if (index >= 0) {
|
||||||
|
addr += offset;
|
||||||
|
fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
|
||||||
|
lpfn = 0x100000000ULL >> PAGE_SHIFT;
|
||||||
|
} else {
|
||||||
|
fpfn = 0;
|
||||||
|
lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
|
||||||
|
addr, lo, hi, size, index);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < bo->placement.num_placement; ++i) {
|
||||||
|
bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
|
||||||
|
bo->placements[i].lpfn = bo->placements[i].fpfn ?
|
||||||
|
min(bo->placements[i].fpfn, lpfn) : lpfn;
|
||||||
|
}
|
||||||
|
return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vce_cs_reloc - command submission relocation
|
* amdgpu_vce_cs_reloc - command submission relocation
|
||||||
*
|
*
|
||||||
|
@ -648,12 +697,13 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
|
||||||
uint32_t allocated = 0;
|
uint32_t allocated = 0;
|
||||||
uint32_t tmp, handle = 0;
|
uint32_t tmp, handle = 0;
|
||||||
uint32_t *size = &tmp;
|
uint32_t *size = &tmp;
|
||||||
int i, r = 0, idx = 0;
|
unsigned idx;
|
||||||
|
int i, r = 0;
|
||||||
|
|
||||||
p->job->vm = NULL;
|
p->job->vm = NULL;
|
||||||
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
|
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
|
||||||
|
|
||||||
while (idx < ib->length_dw) {
|
for (idx = 0; idx < ib->length_dw;) {
|
||||||
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
|
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
|
||||||
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
|
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
|
||||||
|
|
||||||
|
@ -663,6 +713,54 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch (cmd) {
|
||||||
|
case 0x00000002: /* task info */
|
||||||
|
fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
|
||||||
|
bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 0x03000001: /* encode */
|
||||||
|
r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10,
|
||||||
|
idx + 9, 0, 0);
|
||||||
|
if (r)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12,
|
||||||
|
idx + 11, 0, 0);
|
||||||
|
if (r)
|
||||||
|
goto out;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 0x05000001: /* context buffer */
|
||||||
|
r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
|
||||||
|
idx + 2, 0, 0);
|
||||||
|
if (r)
|
||||||
|
goto out;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 0x05000004: /* video bitstream buffer */
|
||||||
|
tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
|
||||||
|
r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
|
||||||
|
tmp, bs_idx);
|
||||||
|
if (r)
|
||||||
|
goto out;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 0x05000005: /* feedback buffer */
|
||||||
|
r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
|
||||||
|
4096, fb_idx);
|
||||||
|
if (r)
|
||||||
|
goto out;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
idx += len / 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (idx = 0; idx < ib->length_dw;) {
|
||||||
|
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
|
||||||
|
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case 0x00000001: /* session */
|
case 0x00000001: /* session */
|
||||||
handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
|
handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
|
||||||
|
@ -954,7 +1052,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < timeout) {
|
if (i < timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
|
||||||
ring->idx, i);
|
ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed\n",
|
DRM_ERROR("amdgpu: ring %d test failed\n",
|
||||||
|
@ -999,7 +1097,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
} else if (r < 0) {
|
} else if (r < 0) {
|
||||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||||
} else {
|
} else {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
}
|
}
|
||||||
error:
|
error:
|
||||||
|
|
|
@ -35,8 +35,8 @@
|
||||||
#include "soc15d.h"
|
#include "soc15d.h"
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "raven1/VCN/vcn_1_0_offset.h"
|
#include "vcn/vcn_1_0_offset.h"
|
||||||
|
|
||||||
/* 1 second timeout */
|
/* 1 second timeout */
|
||||||
#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
|
#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
|
||||||
|
@ -106,7 +106,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||||
ring = &adev->vcn.ring_dec;
|
ring = &adev->vcn.ring_dec;
|
||||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||||
r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
|
r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
|
||||||
rq, amdgpu_sched_jobs);
|
rq, amdgpu_sched_jobs, NULL);
|
||||||
if (r != 0) {
|
if (r != 0) {
|
||||||
DRM_ERROR("Failed setting up VCN dec run queue.\n");
|
DRM_ERROR("Failed setting up VCN dec run queue.\n");
|
||||||
return r;
|
return r;
|
||||||
|
@ -115,7 +115,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||||
ring = &adev->vcn.ring_enc[0];
|
ring = &adev->vcn.ring_enc[0];
|
||||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||||
r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
|
r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
|
||||||
rq, amdgpu_sched_jobs);
|
rq, amdgpu_sched_jobs, NULL);
|
||||||
if (r != 0) {
|
if (r != 0) {
|
||||||
DRM_ERROR("Failed setting up VCN enc run queue.\n");
|
DRM_ERROR("Failed setting up VCN enc run queue.\n");
|
||||||
return r;
|
return r;
|
||||||
|
@ -261,7 +261,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
|
||||||
ring->idx, i);
|
ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
||||||
|
@ -274,6 +274,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||||
bool direct, struct dma_fence **fence)
|
bool direct, struct dma_fence **fence)
|
||||||
{
|
{
|
||||||
|
struct ttm_operation_ctx ctx = { true, false };
|
||||||
struct ttm_validate_buffer tv;
|
struct ttm_validate_buffer tv;
|
||||||
struct ww_acquire_ctx ticket;
|
struct ww_acquire_ctx ticket;
|
||||||
struct list_head head;
|
struct list_head head;
|
||||||
|
@ -294,7 +295,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||||
if (r)
|
if (r)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@ -467,7 +468,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
} else if (r < 0) {
|
} else if (r < 0) {
|
||||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||||
} else {
|
} else {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -500,7 +501,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
|
||||||
ring->idx, i);
|
ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed\n",
|
DRM_ERROR("amdgpu: ring %d test failed\n",
|
||||||
|
@ -643,7 +644,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
} else if (r < 0) {
|
} else if (r < 0) {
|
||||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||||
} else {
|
} else {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
}
|
}
|
||||||
error:
|
error:
|
||||||
|
|
|
@ -24,6 +24,14 @@
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
|
#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
|
||||||
|
|
||||||
|
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
/* By now all MMIO pages except mailbox are blocked */
|
||||||
|
/* if blocking is enabled in hypervisor. Choose the */
|
||||||
|
/* SCRATCH_REG0 to test. */
|
||||||
|
return RREG32_NO_KIQ(0xc040) == 0xffffffff;
|
||||||
|
}
|
||||||
|
|
||||||
int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
|
int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
@ -39,6 +47,12 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void amdgpu_free_static_csa(struct amdgpu_device *adev) {
|
||||||
|
amdgpu_bo_free_kernel(&adev->virt.csa_obj,
|
||||||
|
&adev->virt.csa_vmid0_addr,
|
||||||
|
NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* amdgpu_map_static_csa should be called during amdgpu_vm_init
|
* amdgpu_map_static_csa should be called during amdgpu_vm_init
|
||||||
* it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
|
* it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
|
||||||
|
@ -107,8 +121,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
|
||||||
adev->enable_virtual_display = true;
|
adev->enable_virtual_display = true;
|
||||||
adev->cg_flags = 0;
|
adev->cg_flags = 0;
|
||||||
adev->pg_flags = 0;
|
adev->pg_flags = 0;
|
||||||
|
|
||||||
mutex_init(&adev->virt.lock_reset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
|
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
|
||||||
|
@ -227,6 +239,22 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_virt_wait_reset() - wait for reset gpu completed
|
||||||
|
* @amdgpu: amdgpu device.
|
||||||
|
* Wait for GPU reset completed.
|
||||||
|
* Return: Zero if reset success, otherwise will return error.
|
||||||
|
*/
|
||||||
|
int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
struct amdgpu_virt *virt = &adev->virt;
|
||||||
|
|
||||||
|
if (!virt->ops || !virt->ops->wait_reset)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return virt->ops->wait_reset(adev);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_virt_alloc_mm_table() - alloc memory for mm table
|
* amdgpu_virt_alloc_mm_table() - alloc memory for mm table
|
||||||
* @amdgpu: amdgpu device.
|
* @amdgpu: amdgpu device.
|
||||||
|
@ -296,7 +324,6 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj,
|
||||||
|
|
||||||
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
|
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
uint32_t pf2vf_ver = 0;
|
|
||||||
uint32_t pf2vf_size = 0;
|
uint32_t pf2vf_size = 0;
|
||||||
uint32_t checksum = 0;
|
uint32_t checksum = 0;
|
||||||
uint32_t checkval;
|
uint32_t checkval;
|
||||||
|
@ -309,9 +336,9 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
|
||||||
adev->virt.fw_reserve.p_pf2vf =
|
adev->virt.fw_reserve.p_pf2vf =
|
||||||
(struct amdgim_pf2vf_info_header *)(
|
(struct amdgim_pf2vf_info_header *)(
|
||||||
adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
|
adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
|
||||||
pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
|
|
||||||
AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
|
AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
|
||||||
AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
|
AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
|
||||||
|
AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
|
||||||
|
|
||||||
/* pf2vf message must be in 4K */
|
/* pf2vf message must be in 4K */
|
||||||
if (pf2vf_size > 0 && pf2vf_size < 4096) {
|
if (pf2vf_size > 0 && pf2vf_size < 4096) {
|
||||||
|
|
|
@ -55,6 +55,7 @@ struct amdgpu_virt_ops {
|
||||||
int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
|
int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
|
||||||
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
|
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
|
||||||
int (*reset_gpu)(struct amdgpu_device *adev);
|
int (*reset_gpu)(struct amdgpu_device *adev);
|
||||||
|
int (*wait_reset)(struct amdgpu_device *adev);
|
||||||
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
|
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -80,6 +81,8 @@ enum AMDGIM_FEATURE_FLAG {
|
||||||
AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
|
AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
|
||||||
/* GIM supports feature of loading uCodes */
|
/* GIM supports feature of loading uCodes */
|
||||||
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
|
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
|
||||||
|
/* VRAM LOST by GIM */
|
||||||
|
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgim_pf2vf_info_header {
|
struct amdgim_pf2vf_info_header {
|
||||||
|
@ -238,7 +241,6 @@ struct amdgpu_virt {
|
||||||
uint64_t csa_vmid0_addr;
|
uint64_t csa_vmid0_addr;
|
||||||
bool chained_ib_support;
|
bool chained_ib_support;
|
||||||
uint32_t reg_val_offs;
|
uint32_t reg_val_offs;
|
||||||
struct mutex lock_reset;
|
|
||||||
struct amdgpu_irq_src ack_irq;
|
struct amdgpu_irq_src ack_irq;
|
||||||
struct amdgpu_irq_src rcv_irq;
|
struct amdgpu_irq_src rcv_irq;
|
||||||
struct work_struct flr_work;
|
struct work_struct flr_work;
|
||||||
|
@ -246,6 +248,7 @@ struct amdgpu_virt {
|
||||||
const struct amdgpu_virt_ops *ops;
|
const struct amdgpu_virt_ops *ops;
|
||||||
struct amdgpu_vf_error_buffer vf_errors;
|
struct amdgpu_vf_error_buffer vf_errors;
|
||||||
struct amdgpu_virt_fw_reserve fw_reserve;
|
struct amdgpu_virt_fw_reserve fw_reserve;
|
||||||
|
uint32_t gim_feature;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define AMDGPU_CSA_SIZE (8 * 1024)
|
#define AMDGPU_CSA_SIZE (8 * 1024)
|
||||||
|
@ -276,16 +279,18 @@ static inline bool is_virtual_machine(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct amdgpu_vm;
|
struct amdgpu_vm;
|
||||||
|
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
|
||||||
int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
|
int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
|
||||||
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
struct amdgpu_bo_va **bo_va);
|
struct amdgpu_bo_va **bo_va);
|
||||||
|
void amdgpu_free_static_csa(struct amdgpu_device *adev);
|
||||||
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
|
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
|
||||||
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
|
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
|
||||||
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
|
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
|
||||||
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
|
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
|
||||||
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
|
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
|
||||||
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
|
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
|
||||||
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job);
|
int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
|
||||||
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
|
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
|
||||||
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
|
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
|
||||||
int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
|
int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
|
||||||
|
|
|
@ -138,6 +138,24 @@ struct amdgpu_prt_cb {
|
||||||
struct dma_fence_cb cb;
|
struct dma_fence_cb cb;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_vm_level_shift - return the addr shift for each level
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
*
|
||||||
|
* Returns the number of bits the pfn needs to be right shifted for a level.
|
||||||
|
*/
|
||||||
|
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
|
||||||
|
unsigned level)
|
||||||
|
{
|
||||||
|
if (level != adev->vm_manager.num_level)
|
||||||
|
return 9 * (adev->vm_manager.num_level - level - 1) +
|
||||||
|
adev->vm_manager.block_size;
|
||||||
|
else
|
||||||
|
/* For the page tables on the leaves */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_num_entries - return the number of entries in a PD/PT
|
* amdgpu_vm_num_entries - return the number of entries in a PD/PT
|
||||||
*
|
*
|
||||||
|
@ -148,17 +166,17 @@ struct amdgpu_prt_cb {
|
||||||
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
|
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
|
||||||
unsigned level)
|
unsigned level)
|
||||||
{
|
{
|
||||||
|
unsigned shift = amdgpu_vm_level_shift(adev, 0);
|
||||||
|
|
||||||
if (level == 0)
|
if (level == 0)
|
||||||
/* For the root directory */
|
/* For the root directory */
|
||||||
return adev->vm_manager.max_pfn >>
|
return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
|
||||||
(adev->vm_manager.block_size *
|
else if (level != adev->vm_manager.num_level)
|
||||||
adev->vm_manager.num_level);
|
/* Everything in between */
|
||||||
else if (level == adev->vm_manager.num_level)
|
return 512;
|
||||||
|
else
|
||||||
/* For the page tables on the leaves */
|
/* For the page tables on the leaves */
|
||||||
return AMDGPU_VM_PTE_COUNT(adev);
|
return AMDGPU_VM_PTE_COUNT(adev);
|
||||||
else
|
|
||||||
/* Everything in between */
|
|
||||||
return 1 << adev->vm_manager.block_size;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -288,8 +306,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
||||||
uint64_t saddr, uint64_t eaddr,
|
uint64_t saddr, uint64_t eaddr,
|
||||||
unsigned level)
|
unsigned level)
|
||||||
{
|
{
|
||||||
unsigned shift = (adev->vm_manager.num_level - level) *
|
unsigned shift = amdgpu_vm_level_shift(adev, level);
|
||||||
adev->vm_manager.block_size;
|
|
||||||
unsigned pt_idx, from, to;
|
unsigned pt_idx, from, to;
|
||||||
int r;
|
int r;
|
||||||
u64 flags;
|
u64 flags;
|
||||||
|
@ -471,7 +488,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
|
||||||
id->pd_gpu_addr = 0;
|
id->pd_gpu_addr = 0;
|
||||||
tmp = amdgpu_sync_peek_fence(&id->active, ring);
|
tmp = amdgpu_sync_peek_fence(&id->active, ring);
|
||||||
if (tmp) {
|
if (tmp) {
|
||||||
r = amdgpu_sync_fence(adev, sync, tmp);
|
r = amdgpu_sync_fence(adev, sync, tmp, false);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -479,7 +496,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
|
||||||
/* Good we can use this VMID. Remember this submission as
|
/* Good we can use this VMID. Remember this submission as
|
||||||
* user of the VMID.
|
* user of the VMID.
|
||||||
*/
|
*/
|
||||||
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
|
r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
|
||||||
if (r)
|
if (r)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -566,7 +583,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
r = amdgpu_sync_fence(ring->adev, sync, &array->base);
|
r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
|
||||||
dma_fence_put(&array->base);
|
dma_fence_put(&array->base);
|
||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -609,7 +626,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||||
/* Good we can use this VMID. Remember this submission as
|
/* Good we can use this VMID. Remember this submission as
|
||||||
* user of the VMID.
|
* user of the VMID.
|
||||||
*/
|
*/
|
||||||
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
|
r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
|
||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
@ -629,7 +646,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||||
id = idle;
|
id = idle;
|
||||||
|
|
||||||
/* Remember this submission as user of the VMID */
|
/* Remember this submission as user of the VMID */
|
||||||
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
|
r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
|
||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
@ -1302,18 +1319,19 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
|
||||||
struct amdgpu_vm_pt **entry,
|
struct amdgpu_vm_pt **entry,
|
||||||
struct amdgpu_vm_pt **parent)
|
struct amdgpu_vm_pt **parent)
|
||||||
{
|
{
|
||||||
unsigned idx, level = p->adev->vm_manager.num_level;
|
unsigned level = 0;
|
||||||
|
|
||||||
*parent = NULL;
|
*parent = NULL;
|
||||||
*entry = &p->vm->root;
|
*entry = &p->vm->root;
|
||||||
while ((*entry)->entries) {
|
while ((*entry)->entries) {
|
||||||
idx = addr >> (p->adev->vm_manager.block_size * level--);
|
unsigned idx = addr >> amdgpu_vm_level_shift(p->adev, level++);
|
||||||
|
|
||||||
idx %= amdgpu_bo_size((*entry)->base.bo) / 8;
|
idx %= amdgpu_bo_size((*entry)->base.bo) / 8;
|
||||||
*parent = *entry;
|
*parent = *entry;
|
||||||
*entry = &(*entry)->entries[idx];
|
*entry = &(*entry)->entries[idx];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (level)
|
if (level != p->adev->vm_manager.num_level)
|
||||||
*entry = NULL;
|
*entry = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1639,7 +1657,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||||
addr = 0;
|
addr = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_sync_fence(adev, &job->sync, exclusive);
|
r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free;
|
goto error_free;
|
||||||
|
|
||||||
|
@ -2555,48 +2573,58 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
|
||||||
return ((bits + 3) / 2);
|
return ((bits + 3) / 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* amdgpu_vm_set_fragment_size - adjust fragment size in PTE
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
* @fragment_size_default: the default fragment size if it's set auto
|
|
||||||
*/
|
|
||||||
void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
|
|
||||||
uint32_t fragment_size_default)
|
|
||||||
{
|
|
||||||
if (amdgpu_vm_fragment_size == -1)
|
|
||||||
adev->vm_manager.fragment_size = fragment_size_default;
|
|
||||||
else
|
|
||||||
adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
|
* amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
|
||||||
*
|
*
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
* @vm_size: the default vm size if it's set auto
|
* @vm_size: the default vm size if it's set auto
|
||||||
*/
|
*/
|
||||||
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
|
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
|
||||||
uint32_t fragment_size_default)
|
uint32_t fragment_size_default, unsigned max_level,
|
||||||
|
unsigned max_bits)
|
||||||
{
|
{
|
||||||
/* adjust vm size firstly */
|
uint64_t tmp;
|
||||||
if (amdgpu_vm_size == -1)
|
|
||||||
adev->vm_manager.vm_size = vm_size;
|
|
||||||
else
|
|
||||||
adev->vm_manager.vm_size = amdgpu_vm_size;
|
|
||||||
|
|
||||||
/* block size depends on vm size */
|
/* adjust vm size first */
|
||||||
if (amdgpu_vm_block_size == -1)
|
if (amdgpu_vm_size != -1) {
|
||||||
|
unsigned max_size = 1 << (max_bits - 30);
|
||||||
|
|
||||||
|
vm_size = amdgpu_vm_size;
|
||||||
|
if (vm_size > max_size) {
|
||||||
|
dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
|
||||||
|
amdgpu_vm_size, max_size);
|
||||||
|
vm_size = max_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
|
||||||
|
|
||||||
|
tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
|
||||||
|
if (amdgpu_vm_block_size != -1)
|
||||||
|
tmp >>= amdgpu_vm_block_size - 9;
|
||||||
|
tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
|
||||||
|
adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
|
||||||
|
|
||||||
|
/* block size depends on vm size and hw setup*/
|
||||||
|
if (amdgpu_vm_block_size != -1)
|
||||||
adev->vm_manager.block_size =
|
adev->vm_manager.block_size =
|
||||||
amdgpu_vm_get_block_size(adev->vm_manager.vm_size);
|
min((unsigned)amdgpu_vm_block_size, max_bits
|
||||||
|
- AMDGPU_GPU_PAGE_SHIFT
|
||||||
|
- 9 * adev->vm_manager.num_level);
|
||||||
|
else if (adev->vm_manager.num_level > 1)
|
||||||
|
adev->vm_manager.block_size = 9;
|
||||||
else
|
else
|
||||||
adev->vm_manager.block_size = amdgpu_vm_block_size;
|
adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
|
||||||
|
|
||||||
amdgpu_vm_set_fragment_size(adev, fragment_size_default);
|
if (amdgpu_vm_fragment_size == -1)
|
||||||
|
adev->vm_manager.fragment_size = fragment_size_default;
|
||||||
|
else
|
||||||
|
adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
|
||||||
|
|
||||||
DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n",
|
DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
|
||||||
adev->vm_manager.vm_size, adev->vm_manager.block_size,
|
vm_size, adev->vm_manager.num_level + 1,
|
||||||
adev->vm_manager.fragment_size);
|
adev->vm_manager.block_size,
|
||||||
|
adev->vm_manager.fragment_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2637,7 +2665,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
ring = adev->vm_manager.vm_pte_rings[ring_instance];
|
ring = adev->vm_manager.vm_pte_rings[ring_instance];
|
||||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
|
||||||
r = amd_sched_entity_init(&ring->sched, &vm->entity,
|
r = amd_sched_entity_init(&ring->sched, &vm->entity,
|
||||||
rq, amdgpu_sched_jobs);
|
rq, amdgpu_sched_jobs, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
|
|
@ -96,6 +96,19 @@ struct amdgpu_bo_list_entry;
|
||||||
/* hardcode that limit for now */
|
/* hardcode that limit for now */
|
||||||
#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20)
|
#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20)
|
||||||
|
|
||||||
|
/* VA hole for 48bit addresses on Vega10 */
|
||||||
|
#define AMDGPU_VA_HOLE_START 0x0000800000000000ULL
|
||||||
|
#define AMDGPU_VA_HOLE_END 0xffff800000000000ULL
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hardware is programmed as if the hole doesn't exists with start and end
|
||||||
|
* address values.
|
||||||
|
*
|
||||||
|
* This mask is used to remove the upper 16bits of the VA and so come up with
|
||||||
|
* the linear addr value.
|
||||||
|
*/
|
||||||
|
#define AMDGPU_VA_HOLE_MASK 0x0000ffffffffffffULL
|
||||||
|
|
||||||
/* max vmids dedicated for process */
|
/* max vmids dedicated for process */
|
||||||
#define AMDGPU_VM_MAX_RESERVED_VMID 1
|
#define AMDGPU_VM_MAX_RESERVED_VMID 1
|
||||||
|
|
||||||
|
@ -221,7 +234,6 @@ struct amdgpu_vm_manager {
|
||||||
|
|
||||||
uint64_t max_pfn;
|
uint64_t max_pfn;
|
||||||
uint32_t num_level;
|
uint32_t num_level;
|
||||||
uint64_t vm_size;
|
|
||||||
uint32_t block_size;
|
uint32_t block_size;
|
||||||
uint32_t fragment_size;
|
uint32_t fragment_size;
|
||||||
/* vram base address for page table entry */
|
/* vram base address for page table entry */
|
||||||
|
@ -312,10 +324,9 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
|
||||||
uint64_t addr);
|
uint64_t addr);
|
||||||
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||||
struct amdgpu_bo_va *bo_va);
|
struct amdgpu_bo_va *bo_va);
|
||||||
void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
|
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
|
||||||
uint32_t fragment_size_default);
|
uint32_t fragment_size_default, unsigned max_level,
|
||||||
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
|
unsigned max_bits);
|
||||||
uint32_t fragment_size_default);
|
|
||||||
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
|
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
|
||||||
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
||||||
struct amdgpu_job *job);
|
struct amdgpu_job *job);
|
||||||
|
|
|
@ -65,8 +65,15 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
|
||||||
args.ucRegIndex = buf[0];
|
args.ucRegIndex = buf[0];
|
||||||
if (num)
|
if (num)
|
||||||
num--;
|
num--;
|
||||||
if (num)
|
if (num) {
|
||||||
memcpy(&out, &buf[1], num);
|
if (buf) {
|
||||||
|
memcpy(&out, &buf[1], num);
|
||||||
|
} else {
|
||||||
|
DRM_ERROR("hw i2c: missing buf with num > 1\n");
|
||||||
|
r = -EINVAL;
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
}
|
||||||
args.lpI2CDataOut = cpu_to_le16(out);
|
args.lpI2CDataOut = cpu_to_le16(out);
|
||||||
} else {
|
} else {
|
||||||
if (num > ATOM_MAX_HW_I2C_READ) {
|
if (num > ATOM_MAX_HW_I2C_READ) {
|
||||||
|
|
|
@ -4540,9 +4540,9 @@ static int ci_set_mc_special_registers(struct amdgpu_device *adev,
|
||||||
((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
|
((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
|
||||||
}
|
}
|
||||||
j++;
|
j++;
|
||||||
|
|
||||||
if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
|
if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
temp_reg = RREG32(mmMC_PMG_CMD_MRS);
|
temp_reg = RREG32(mmMC_PMG_CMD_MRS);
|
||||||
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
|
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
|
||||||
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
|
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
|
||||||
|
@ -4553,10 +4553,10 @@ static int ci_set_mc_special_registers(struct amdgpu_device *adev,
|
||||||
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
|
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
|
||||||
}
|
}
|
||||||
j++;
|
j++;
|
||||||
if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
|
if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
|
||||||
|
if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
|
||||||
|
return -EINVAL;
|
||||||
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
|
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
|
||||||
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
|
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
|
||||||
for (k = 0; k < table->num_entries; k++) {
|
for (k = 0; k < table->num_entries; k++) {
|
||||||
|
@ -4564,8 +4564,6 @@ static int ci_set_mc_special_registers(struct amdgpu_device *adev,
|
||||||
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
|
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
|
||||||
}
|
}
|
||||||
j++;
|
j++;
|
||||||
if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case mmMC_SEQ_RESERVE_M:
|
case mmMC_SEQ_RESERVE_M:
|
||||||
|
@ -4577,8 +4575,6 @@ static int ci_set_mc_special_registers(struct amdgpu_device *adev,
|
||||||
(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
|
(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
|
||||||
}
|
}
|
||||||
j++;
|
j++;
|
||||||
if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
|
|
||||||
return -EINVAL;
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -6625,9 +6621,9 @@ static int ci_dpm_print_clock_levels(void *handle,
|
||||||
|
|
||||||
for (i = 0; i < pcie_table->count; i++)
|
for (i = 0; i < pcie_table->count; i++)
|
||||||
size += sprintf(buf + size, "%d: %s %s\n", i,
|
size += sprintf(buf + size, "%d: %s %s\n", i,
|
||||||
(pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
|
(pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x1" :
|
||||||
(pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
|
(pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
|
||||||
(pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
|
(pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
|
||||||
(i == now) ? "*" : "");
|
(i == now) ? "*" : "");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -757,72 +757,72 @@ static void cik_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_BONAIRE:
|
case CHIP_BONAIRE:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
bonaire_mgcg_cgcg_init,
|
bonaire_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
|
ARRAY_SIZE(bonaire_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
bonaire_golden_registers,
|
bonaire_golden_registers,
|
||||||
(const u32)ARRAY_SIZE(bonaire_golden_registers));
|
ARRAY_SIZE(bonaire_golden_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
bonaire_golden_common_registers,
|
bonaire_golden_common_registers,
|
||||||
(const u32)ARRAY_SIZE(bonaire_golden_common_registers));
|
ARRAY_SIZE(bonaire_golden_common_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
bonaire_golden_spm_registers,
|
bonaire_golden_spm_registers,
|
||||||
(const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
|
ARRAY_SIZE(bonaire_golden_spm_registers));
|
||||||
break;
|
break;
|
||||||
case CHIP_KABINI:
|
case CHIP_KABINI:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
kalindi_mgcg_cgcg_init,
|
kalindi_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
|
ARRAY_SIZE(kalindi_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
kalindi_golden_registers,
|
kalindi_golden_registers,
|
||||||
(const u32)ARRAY_SIZE(kalindi_golden_registers));
|
ARRAY_SIZE(kalindi_golden_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
kalindi_golden_common_registers,
|
kalindi_golden_common_registers,
|
||||||
(const u32)ARRAY_SIZE(kalindi_golden_common_registers));
|
ARRAY_SIZE(kalindi_golden_common_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
kalindi_golden_spm_registers,
|
kalindi_golden_spm_registers,
|
||||||
(const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
|
ARRAY_SIZE(kalindi_golden_spm_registers));
|
||||||
break;
|
break;
|
||||||
case CHIP_MULLINS:
|
case CHIP_MULLINS:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
kalindi_mgcg_cgcg_init,
|
kalindi_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
|
ARRAY_SIZE(kalindi_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
godavari_golden_registers,
|
godavari_golden_registers,
|
||||||
(const u32)ARRAY_SIZE(godavari_golden_registers));
|
ARRAY_SIZE(godavari_golden_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
kalindi_golden_common_registers,
|
kalindi_golden_common_registers,
|
||||||
(const u32)ARRAY_SIZE(kalindi_golden_common_registers));
|
ARRAY_SIZE(kalindi_golden_common_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
kalindi_golden_spm_registers,
|
kalindi_golden_spm_registers,
|
||||||
(const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
|
ARRAY_SIZE(kalindi_golden_spm_registers));
|
||||||
break;
|
break;
|
||||||
case CHIP_KAVERI:
|
case CHIP_KAVERI:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
spectre_mgcg_cgcg_init,
|
spectre_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
|
ARRAY_SIZE(spectre_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
spectre_golden_registers,
|
spectre_golden_registers,
|
||||||
(const u32)ARRAY_SIZE(spectre_golden_registers));
|
ARRAY_SIZE(spectre_golden_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
spectre_golden_common_registers,
|
spectre_golden_common_registers,
|
||||||
(const u32)ARRAY_SIZE(spectre_golden_common_registers));
|
ARRAY_SIZE(spectre_golden_common_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
spectre_golden_spm_registers,
|
spectre_golden_spm_registers,
|
||||||
(const u32)ARRAY_SIZE(spectre_golden_spm_registers));
|
ARRAY_SIZE(spectre_golden_spm_registers));
|
||||||
break;
|
break;
|
||||||
case CHIP_HAWAII:
|
case CHIP_HAWAII:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
hawaii_mgcg_cgcg_init,
|
hawaii_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
|
ARRAY_SIZE(hawaii_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
hawaii_golden_registers,
|
hawaii_golden_registers,
|
||||||
(const u32)ARRAY_SIZE(hawaii_golden_registers));
|
ARRAY_SIZE(hawaii_golden_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
hawaii_golden_common_registers,
|
hawaii_golden_common_registers,
|
||||||
(const u32)ARRAY_SIZE(hawaii_golden_common_registers));
|
ARRAY_SIZE(hawaii_golden_common_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
hawaii_golden_spm_registers,
|
hawaii_golden_spm_registers,
|
||||||
(const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
|
ARRAY_SIZE(hawaii_golden_spm_registers));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -657,7 +657,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
||||||
ring->idx, tmp);
|
ring->idx, tmp);
|
||||||
|
@ -724,7 +724,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
}
|
}
|
||||||
tmp = le32_to_cpu(adev->wb.wb[index]);
|
tmp = le32_to_cpu(adev->wb.wb[index]);
|
||||||
if (tmp == 0xDEADBEEF) {
|
if (tmp == 0xDEADBEEF) {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
|
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
|
||||||
|
|
|
@ -147,18 +147,18 @@ static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_FIJI:
|
case CHIP_FIJI:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
fiji_mgcg_cgcg_init,
|
fiji_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
|
ARRAY_SIZE(fiji_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_fiji_a10,
|
golden_settings_fiji_a10,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_fiji_a10));
|
ARRAY_SIZE(golden_settings_fiji_a10));
|
||||||
break;
|
break;
|
||||||
case CHIP_TONGA:
|
case CHIP_TONGA:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
tonga_mgcg_cgcg_init,
|
tonga_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
|
ARRAY_SIZE(tonga_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_tonga_a11,
|
golden_settings_tonga_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
|
ARRAY_SIZE(golden_settings_tonga_a11));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -2773,7 +2773,6 @@ static int dce_v10_0_early_init(void *handle)
|
||||||
adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
|
adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
|
||||||
|
|
||||||
dce_v10_0_set_display_funcs(adev);
|
dce_v10_0_set_display_funcs(adev);
|
||||||
dce_v10_0_set_irq_funcs(adev);
|
|
||||||
|
|
||||||
adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
|
adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
|
||||||
|
|
||||||
|
@ -2788,6 +2787,8 @@ static int dce_v10_0_early_init(void *handle)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dce_v10_0_set_irq_funcs(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3635,13 +3636,16 @@ static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = {
|
||||||
|
|
||||||
static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
|
static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
|
if (adev->mode_info.num_crtc > 0)
|
||||||
|
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
|
||||||
|
else
|
||||||
|
adev->crtc_irq.num_types = 0;
|
||||||
adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
|
adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
|
||||||
|
|
||||||
adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
|
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
|
||||||
adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
|
adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
|
||||||
|
|
||||||
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
|
adev->hpd_irq.num_types = adev->mode_info.num_hpd;
|
||||||
adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
|
adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -156,26 +156,26 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_CARRIZO:
|
case CHIP_CARRIZO:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
cz_mgcg_cgcg_init,
|
cz_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
|
ARRAY_SIZE(cz_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
cz_golden_settings_a11,
|
cz_golden_settings_a11,
|
||||||
(const u32)ARRAY_SIZE(cz_golden_settings_a11));
|
ARRAY_SIZE(cz_golden_settings_a11));
|
||||||
break;
|
break;
|
||||||
case CHIP_STONEY:
|
case CHIP_STONEY:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
stoney_golden_settings_a11,
|
stoney_golden_settings_a11,
|
||||||
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
|
ARRAY_SIZE(stoney_golden_settings_a11));
|
||||||
break;
|
break;
|
||||||
case CHIP_POLARIS11:
|
case CHIP_POLARIS11:
|
||||||
case CHIP_POLARIS12:
|
case CHIP_POLARIS12:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
polaris11_golden_settings_a11,
|
polaris11_golden_settings_a11,
|
||||||
(const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
|
ARRAY_SIZE(polaris11_golden_settings_a11));
|
||||||
break;
|
break;
|
||||||
case CHIP_POLARIS10:
|
case CHIP_POLARIS10:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
polaris10_golden_settings_a11,
|
polaris10_golden_settings_a11,
|
||||||
(const u32)ARRAY_SIZE(polaris10_golden_settings_a11));
|
ARRAY_SIZE(polaris10_golden_settings_a11));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -2876,7 +2876,6 @@ static int dce_v11_0_early_init(void *handle)
|
||||||
adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
|
adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
|
||||||
|
|
||||||
dce_v11_0_set_display_funcs(adev);
|
dce_v11_0_set_display_funcs(adev);
|
||||||
dce_v11_0_set_irq_funcs(adev);
|
|
||||||
|
|
||||||
adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
|
adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
|
||||||
|
|
||||||
|
@ -2903,6 +2902,8 @@ static int dce_v11_0_early_init(void *handle)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dce_v11_0_set_irq_funcs(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3759,13 +3760,16 @@ static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
|
||||||
|
|
||||||
static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
|
static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
|
if (adev->mode_info.num_crtc > 0)
|
||||||
|
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
|
||||||
|
else
|
||||||
|
adev->crtc_irq.num_types = 0;
|
||||||
adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
|
adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
|
||||||
|
|
||||||
adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
|
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
|
||||||
adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
|
adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
|
||||||
|
|
||||||
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
|
adev->hpd_irq.num_types = adev->mode_info.num_hpd;
|
||||||
adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
|
adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2639,7 +2639,6 @@ static int dce_v6_0_early_init(void *handle)
|
||||||
adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
|
adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
|
||||||
|
|
||||||
dce_v6_0_set_display_funcs(adev);
|
dce_v6_0_set_display_funcs(adev);
|
||||||
dce_v6_0_set_irq_funcs(adev);
|
|
||||||
|
|
||||||
adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
|
adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
|
||||||
|
|
||||||
|
@ -2658,6 +2657,8 @@ static int dce_v6_0_early_init(void *handle)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dce_v6_0_set_irq_funcs(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3441,13 +3442,16 @@ static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
|
||||||
|
|
||||||
static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
|
static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
|
if (adev->mode_info.num_crtc > 0)
|
||||||
|
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
|
||||||
|
else
|
||||||
|
adev->crtc_irq.num_types = 0;
|
||||||
adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
|
adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
|
||||||
|
|
||||||
adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
|
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
|
||||||
adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
|
adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
|
||||||
|
|
||||||
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
|
adev->hpd_irq.num_types = adev->mode_info.num_hpd;
|
||||||
adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
|
adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2664,7 +2664,6 @@ static int dce_v8_0_early_init(void *handle)
|
||||||
adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
|
adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
|
||||||
|
|
||||||
dce_v8_0_set_display_funcs(adev);
|
dce_v8_0_set_display_funcs(adev);
|
||||||
dce_v8_0_set_irq_funcs(adev);
|
|
||||||
|
|
||||||
adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
|
adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
|
||||||
|
|
||||||
|
@ -2688,6 +2687,8 @@ static int dce_v8_0_early_init(void *handle)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dce_v8_0_set_irq_funcs(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3525,13 +3526,16 @@ static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
|
||||||
|
|
||||||
static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
|
static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
|
if (adev->mode_info.num_crtc > 0)
|
||||||
|
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
|
||||||
|
else
|
||||||
|
adev->crtc_irq.num_types = 0;
|
||||||
adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
|
adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
|
||||||
|
|
||||||
adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
|
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
|
||||||
adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
|
adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
|
||||||
|
|
||||||
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
|
adev->hpd_irq.num_types = adev->mode_info.num_hpd;
|
||||||
adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
|
adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,9 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
|
||||||
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
|
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
|
||||||
static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
|
static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
|
||||||
int index);
|
int index);
|
||||||
|
static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
|
||||||
|
int crtc,
|
||||||
|
enum amdgpu_interrupt_state state);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dce_virtual_vblank_wait - vblank wait asic callback.
|
* dce_virtual_vblank_wait - vblank wait asic callback.
|
||||||
|
@ -437,6 +440,8 @@ static int dce_virtual_sw_fini(void *handle)
|
||||||
drm_kms_helper_poll_fini(adev->ddev);
|
drm_kms_helper_poll_fini(adev->ddev);
|
||||||
|
|
||||||
drm_mode_config_cleanup(adev->ddev);
|
drm_mode_config_cleanup(adev->ddev);
|
||||||
|
/* clear crtcs pointer to avoid dce irq finish routine access freed data */
|
||||||
|
memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS);
|
||||||
adev->mode_info.mode_config_initialized = false;
|
adev->mode_info.mode_config_initialized = false;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -489,6 +494,13 @@ static int dce_virtual_hw_init(void *handle)
|
||||||
|
|
||||||
static int dce_virtual_hw_fini(void *handle)
|
static int dce_virtual_hw_fini(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
|
for (i = 0; i<adev->mode_info.num_crtc; i++)
|
||||||
|
if (adev->mode_info.crtcs[i])
|
||||||
|
dce_virtual_set_crtc_vblank_interrupt_state(adev, i, AMDGPU_IRQ_STATE_DISABLE);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -723,7 +735,7 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad
|
||||||
int crtc,
|
int crtc,
|
||||||
enum amdgpu_interrupt_state state)
|
enum amdgpu_interrupt_state state)
|
||||||
{
|
{
|
||||||
if (crtc >= adev->mode_info.num_crtc) {
|
if (crtc >= adev->mode_info.num_crtc || !adev->mode_info.crtcs[crtc]) {
|
||||||
DRM_DEBUG("invalid crtc %d\n", crtc);
|
DRM_DEBUG("invalid crtc %d\n", crtc);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1798,7 +1798,7 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
DRM_UDELAY(1);
|
DRM_UDELAY(1);
|
||||||
}
|
}
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
|
||||||
ring->idx, scratch, tmp);
|
ring->idx, scratch, tmp);
|
||||||
|
@ -1951,7 +1951,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
}
|
}
|
||||||
tmp = RREG32(scratch);
|
tmp = RREG32(scratch);
|
||||||
if (tmp == 0xDEADBEEF) {
|
if (tmp == 0xDEADBEEF) {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
||||||
|
@ -2962,25 +2962,7 @@ static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
|
||||||
|
|
||||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||||
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
|
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
|
||||||
|
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
|
||||||
switch (adev->asic_type) {
|
|
||||||
case CHIP_TAHITI:
|
|
||||||
case CHIP_PITCAIRN:
|
|
||||||
buffer[count++] = cpu_to_le32(0x2a00126a);
|
|
||||||
break;
|
|
||||||
case CHIP_VERDE:
|
|
||||||
buffer[count++] = cpu_to_le32(0x0000124a);
|
|
||||||
break;
|
|
||||||
case CHIP_OLAND:
|
|
||||||
buffer[count++] = cpu_to_le32(0x00000082);
|
|
||||||
break;
|
|
||||||
case CHIP_HAINAN:
|
|
||||||
buffer[count++] = cpu_to_le32(0x00000000);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
buffer[count++] = cpu_to_le32(0x00000000);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||||
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
|
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
|
||||||
|
|
|
@ -2085,7 +2085,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
DRM_UDELAY(1);
|
DRM_UDELAY(1);
|
||||||
}
|
}
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
|
||||||
ring->idx, scratch, tmp);
|
ring->idx, scratch, tmp);
|
||||||
|
@ -2365,7 +2365,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
}
|
}
|
||||||
tmp = RREG32(scratch);
|
tmp = RREG32(scratch);
|
||||||
if (tmp == 0xDEADBEEF) {
|
if (tmp == 0xDEADBEEF) {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
||||||
|
@ -2551,29 +2551,8 @@ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
|
||||||
|
|
||||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||||
amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
|
amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
|
||||||
switch (adev->asic_type) {
|
amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
|
||||||
case CHIP_BONAIRE:
|
amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
|
||||||
amdgpu_ring_write(ring, 0x16000012);
|
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
|
||||||
break;
|
|
||||||
case CHIP_KAVERI:
|
|
||||||
amdgpu_ring_write(ring, 0x00000000); /* XXX */
|
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
|
||||||
break;
|
|
||||||
case CHIP_KABINI:
|
|
||||||
case CHIP_MULLINS:
|
|
||||||
amdgpu_ring_write(ring, 0x00000000); /* XXX */
|
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
|
||||||
break;
|
|
||||||
case CHIP_HAWAII:
|
|
||||||
amdgpu_ring_write(ring, 0x3a00161a);
|
|
||||||
amdgpu_ring_write(ring, 0x0000002e);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||||
amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
|
amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
|
||||||
|
|
|
@ -681,53 +681,53 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_TOPAZ:
|
case CHIP_TOPAZ:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
iceland_mgcg_cgcg_init,
|
iceland_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
|
ARRAY_SIZE(iceland_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_iceland_a11,
|
golden_settings_iceland_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_iceland_a11));
|
ARRAY_SIZE(golden_settings_iceland_a11));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
iceland_golden_common_all,
|
iceland_golden_common_all,
|
||||||
(const u32)ARRAY_SIZE(iceland_golden_common_all));
|
ARRAY_SIZE(iceland_golden_common_all));
|
||||||
break;
|
break;
|
||||||
case CHIP_FIJI:
|
case CHIP_FIJI:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
fiji_mgcg_cgcg_init,
|
fiji_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
|
ARRAY_SIZE(fiji_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_fiji_a10,
|
golden_settings_fiji_a10,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_fiji_a10));
|
ARRAY_SIZE(golden_settings_fiji_a10));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
fiji_golden_common_all,
|
fiji_golden_common_all,
|
||||||
(const u32)ARRAY_SIZE(fiji_golden_common_all));
|
ARRAY_SIZE(fiji_golden_common_all));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case CHIP_TONGA:
|
case CHIP_TONGA:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
tonga_mgcg_cgcg_init,
|
tonga_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
|
ARRAY_SIZE(tonga_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_tonga_a11,
|
golden_settings_tonga_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
|
ARRAY_SIZE(golden_settings_tonga_a11));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
tonga_golden_common_all,
|
tonga_golden_common_all,
|
||||||
(const u32)ARRAY_SIZE(tonga_golden_common_all));
|
ARRAY_SIZE(tonga_golden_common_all));
|
||||||
break;
|
break;
|
||||||
case CHIP_POLARIS11:
|
case CHIP_POLARIS11:
|
||||||
case CHIP_POLARIS12:
|
case CHIP_POLARIS12:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_polaris11_a11,
|
golden_settings_polaris11_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
|
ARRAY_SIZE(golden_settings_polaris11_a11));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
polaris11_golden_common_all,
|
polaris11_golden_common_all,
|
||||||
(const u32)ARRAY_SIZE(polaris11_golden_common_all));
|
ARRAY_SIZE(polaris11_golden_common_all));
|
||||||
break;
|
break;
|
||||||
case CHIP_POLARIS10:
|
case CHIP_POLARIS10:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_polaris10_a11,
|
golden_settings_polaris10_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
|
ARRAY_SIZE(golden_settings_polaris10_a11));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
polaris10_golden_common_all,
|
polaris10_golden_common_all,
|
||||||
(const u32)ARRAY_SIZE(polaris10_golden_common_all));
|
ARRAY_SIZE(polaris10_golden_common_all));
|
||||||
WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
|
WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
|
||||||
if (adev->pdev->revision == 0xc7 &&
|
if (adev->pdev->revision == 0xc7 &&
|
||||||
((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
|
((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
|
||||||
|
@ -740,24 +740,24 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_CARRIZO:
|
case CHIP_CARRIZO:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
cz_mgcg_cgcg_init,
|
cz_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
|
ARRAY_SIZE(cz_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
cz_golden_settings_a11,
|
cz_golden_settings_a11,
|
||||||
(const u32)ARRAY_SIZE(cz_golden_settings_a11));
|
ARRAY_SIZE(cz_golden_settings_a11));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
cz_golden_common_all,
|
cz_golden_common_all,
|
||||||
(const u32)ARRAY_SIZE(cz_golden_common_all));
|
ARRAY_SIZE(cz_golden_common_all));
|
||||||
break;
|
break;
|
||||||
case CHIP_STONEY:
|
case CHIP_STONEY:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
stoney_mgcg_cgcg_init,
|
stoney_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
stoney_golden_settings_a11,
|
stoney_golden_settings_a11,
|
||||||
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
|
ARRAY_SIZE(stoney_golden_settings_a11));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
stoney_golden_common_all,
|
stoney_golden_common_all,
|
||||||
(const u32)ARRAY_SIZE(stoney_golden_common_all));
|
ARRAY_SIZE(stoney_golden_common_all));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -804,7 +804,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
DRM_UDELAY(1);
|
DRM_UDELAY(1);
|
||||||
}
|
}
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
|
||||||
ring->idx, i);
|
ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
|
||||||
|
@ -856,7 +856,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
}
|
}
|
||||||
tmp = RREG32(scratch);
|
tmp = RREG32(scratch);
|
||||||
if (tmp == 0xDEADBEEF) {
|
if (tmp == 0xDEADBEEF) {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
||||||
|
@ -2114,7 +2114,6 @@ static int gfx_v8_0_sw_fini(void *handle)
|
||||||
amdgpu_gfx_compute_mqd_sw_fini(adev);
|
amdgpu_gfx_compute_mqd_sw_fini(adev);
|
||||||
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
|
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
|
||||||
amdgpu_gfx_kiq_fini(adev);
|
amdgpu_gfx_kiq_fini(adev);
|
||||||
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
|
|
||||||
|
|
||||||
gfx_v8_0_mec_fini(adev);
|
gfx_v8_0_mec_fini(adev);
|
||||||
gfx_v8_0_rlc_fini(adev);
|
gfx_v8_0_rlc_fini(adev);
|
||||||
|
@ -3851,6 +3850,14 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
|
||||||
break;
|
break;
|
||||||
udelay(1);
|
udelay(1);
|
||||||
}
|
}
|
||||||
|
if (k == adev->usec_timeout) {
|
||||||
|
gfx_v8_0_select_se_sh(adev, 0xffffffff,
|
||||||
|
0xffffffff, 0xffffffff);
|
||||||
|
mutex_unlock(&adev->grbm_idx_mutex);
|
||||||
|
DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
|
||||||
|
i, j);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||||
|
@ -4305,37 +4312,8 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
|
||||||
|
|
||||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||||
amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
|
amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
|
||||||
switch (adev->asic_type) {
|
amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
|
||||||
case CHIP_TONGA:
|
amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
|
||||||
case CHIP_POLARIS10:
|
|
||||||
amdgpu_ring_write(ring, 0x16000012);
|
|
||||||
amdgpu_ring_write(ring, 0x0000002A);
|
|
||||||
break;
|
|
||||||
case CHIP_POLARIS11:
|
|
||||||
case CHIP_POLARIS12:
|
|
||||||
amdgpu_ring_write(ring, 0x16000012);
|
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
|
||||||
break;
|
|
||||||
case CHIP_FIJI:
|
|
||||||
amdgpu_ring_write(ring, 0x3a00161a);
|
|
||||||
amdgpu_ring_write(ring, 0x0000002e);
|
|
||||||
break;
|
|
||||||
case CHIP_CARRIZO:
|
|
||||||
amdgpu_ring_write(ring, 0x00000002);
|
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
|
||||||
break;
|
|
||||||
case CHIP_TOPAZ:
|
|
||||||
amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
|
|
||||||
0x00000000 : 0x00000002);
|
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
|
||||||
break;
|
|
||||||
case CHIP_STONEY:
|
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||||
amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
|
amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
|
||||||
|
@ -4816,7 +4794,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
|
||||||
|
|
||||||
gfx_v8_0_kiq_setting(ring);
|
gfx_v8_0_kiq_setting(ring);
|
||||||
|
|
||||||
if (adev->in_sriov_reset) { /* for GPU_RESET case */
|
if (adev->in_gpu_reset) { /* for GPU_RESET case */
|
||||||
/* reset MQD to a clean status */
|
/* reset MQD to a clean status */
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
|
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
|
||||||
|
@ -4853,7 +4831,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
|
||||||
struct vi_mqd *mqd = ring->mqd_ptr;
|
struct vi_mqd *mqd = ring->mqd_ptr;
|
||||||
int mqd_idx = ring - &adev->gfx.compute_ring[0];
|
int mqd_idx = ring - &adev->gfx.compute_ring[0];
|
||||||
|
|
||||||
if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
|
if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
|
||||||
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
|
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
|
||||||
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
|
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
|
||||||
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
|
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
|
||||||
|
@ -4865,13 +4843,10 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
|
||||||
|
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
|
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
|
||||||
} else if (adev->in_sriov_reset) { /* for GPU_RESET case */
|
} else if (adev->in_gpu_reset) { /* for GPU_RESET case */
|
||||||
/* reset MQD to a clean status */
|
/* reset MQD to a clean status */
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
|
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
|
||||||
/* reset ring buffer */
|
|
||||||
ring->wptr = 0;
|
|
||||||
amdgpu_ring_clear_ring(ring);
|
|
||||||
} else {
|
} else {
|
||||||
amdgpu_ring_clear_ring(ring);
|
amdgpu_ring_clear_ring(ring);
|
||||||
}
|
}
|
||||||
|
@ -4946,6 +4921,13 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
|
||||||
/* Test KCQs */
|
/* Test KCQs */
|
||||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||||
ring = &adev->gfx.compute_ring[i];
|
ring = &adev->gfx.compute_ring[i];
|
||||||
|
if (adev->in_gpu_reset) {
|
||||||
|
/* move reset ring buffer to here to workaround
|
||||||
|
* compute ring test failed
|
||||||
|
*/
|
||||||
|
ring->wptr = 0;
|
||||||
|
amdgpu_ring_clear_ring(ring);
|
||||||
|
}
|
||||||
ring->ready = true;
|
ring->ready = true;
|
||||||
r = amdgpu_ring_test_ring(ring);
|
r = amdgpu_ring_test_ring(ring);
|
||||||
if (r)
|
if (r)
|
||||||
|
|
|
@ -28,11 +28,11 @@
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
#include "soc15d.h"
|
#include "soc15d.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "vega10/GC/gc_9_0_offset.h"
|
#include "gc/gc_9_0_offset.h"
|
||||||
#include "vega10/GC/gc_9_0_sh_mask.h"
|
#include "gc/gc_9_0_sh_mask.h"
|
||||||
#include "vega10/vega10_enum.h"
|
#include "vega10_enum.h"
|
||||||
#include "vega10/HDP/hdp_4_0_offset.h"
|
#include "hdp/hdp_4_0_offset.h"
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
#include "clearstate_gfx9.h"
|
#include "clearstate_gfx9.h"
|
||||||
|
@ -232,18 +232,18 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_gc_9_0,
|
golden_settings_gc_9_0,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_gc_9_0));
|
ARRAY_SIZE(golden_settings_gc_9_0));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_gc_9_0_vg10,
|
golden_settings_gc_9_0_vg10,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_gc_9_0_vg10));
|
ARRAY_SIZE(golden_settings_gc_9_0_vg10));
|
||||||
break;
|
break;
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_gc_9_1,
|
golden_settings_gc_9_1,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_gc_9_1));
|
ARRAY_SIZE(golden_settings_gc_9_1));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_gc_9_1_rv1,
|
golden_settings_gc_9_1_rv1,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_gc_9_1_rv1));
|
ARRAY_SIZE(golden_settings_gc_9_1_rv1));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -327,7 +327,7 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
DRM_UDELAY(1);
|
DRM_UDELAY(1);
|
||||||
}
|
}
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
|
||||||
ring->idx, i);
|
ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
|
||||||
|
@ -379,7 +379,7 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
}
|
}
|
||||||
tmp = RREG32(scratch);
|
tmp = RREG32(scratch);
|
||||||
if (tmp == 0xDEADBEEF) {
|
if (tmp == 0xDEADBEEF) {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
||||||
|
@ -1464,7 +1464,6 @@ static int gfx_v9_0_sw_fini(void *handle)
|
||||||
amdgpu_gfx_compute_mqd_sw_fini(adev);
|
amdgpu_gfx_compute_mqd_sw_fini(adev);
|
||||||
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
|
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
|
||||||
amdgpu_gfx_kiq_fini(adev);
|
amdgpu_gfx_kiq_fini(adev);
|
||||||
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
|
|
||||||
|
|
||||||
gfx_v9_0_mec_fini(adev);
|
gfx_v9_0_mec_fini(adev);
|
||||||
gfx_v9_0_ngg_fini(adev);
|
gfx_v9_0_ngg_fini(adev);
|
||||||
|
@ -1645,6 +1644,14 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
|
||||||
break;
|
break;
|
||||||
udelay(1);
|
udelay(1);
|
||||||
}
|
}
|
||||||
|
if (k == adev->usec_timeout) {
|
||||||
|
gfx_v9_0_select_se_sh(adev, 0xffffffff,
|
||||||
|
0xffffffff, 0xffffffff);
|
||||||
|
mutex_unlock(&adev->grbm_idx_mutex);
|
||||||
|
DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
|
||||||
|
i, j);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||||
|
@ -2749,7 +2756,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
|
||||||
|
|
||||||
gfx_v9_0_kiq_setting(ring);
|
gfx_v9_0_kiq_setting(ring);
|
||||||
|
|
||||||
if (adev->in_sriov_reset) { /* for GPU_RESET case */
|
if (adev->in_gpu_reset) { /* for GPU_RESET case */
|
||||||
/* reset MQD to a clean status */
|
/* reset MQD to a clean status */
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
|
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
|
||||||
|
@ -2787,7 +2794,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
|
||||||
struct v9_mqd *mqd = ring->mqd_ptr;
|
struct v9_mqd *mqd = ring->mqd_ptr;
|
||||||
int mqd_idx = ring - &adev->gfx.compute_ring[0];
|
int mqd_idx = ring - &adev->gfx.compute_ring[0];
|
||||||
|
|
||||||
if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
|
if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
|
||||||
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
|
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
|
||||||
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
|
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
|
||||||
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
|
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
|
||||||
|
@ -2799,7 +2806,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
|
||||||
|
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
|
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
|
||||||
} else if (adev->in_sriov_reset) { /* for GPU_RESET case */
|
} else if (adev->in_gpu_reset) { /* for GPU_RESET case */
|
||||||
/* reset MQD to a clean status */
|
/* reset MQD to a clean status */
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
|
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
|
||||||
|
|
|
@ -23,11 +23,11 @@
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "gfxhub_v1_0.h"
|
#include "gfxhub_v1_0.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "vega10/GC/gc_9_0_offset.h"
|
#include "gc/gc_9_0_offset.h"
|
||||||
#include "vega10/GC/gc_9_0_sh_mask.h"
|
#include "gc/gc_9_0_sh_mask.h"
|
||||||
#include "vega10/GC/gc_9_0_default.h"
|
#include "gc/gc_9_0_default.h"
|
||||||
#include "vega10/vega10_enum.h"
|
#include "vega10_enum.h"
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
|
||||||
|
|
|
@ -222,11 +222,6 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||||
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
|
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
|
||||||
base <<= 24;
|
base <<= 24;
|
||||||
|
|
||||||
if (mc->mc_vram_size > 0xFFC0000000ULL) {
|
|
||||||
dev_warn(adev->dev, "limiting VRAM\n");
|
|
||||||
mc->real_vram_size = 0xFFC0000000ULL;
|
|
||||||
mc->mc_vram_size = 0xFFC0000000ULL;
|
|
||||||
}
|
|
||||||
amdgpu_vram_location(adev, &adev->mc, base);
|
amdgpu_vram_location(adev, &adev->mc, base);
|
||||||
amdgpu_gart_location(adev, mc);
|
amdgpu_gart_location(adev, mc);
|
||||||
}
|
}
|
||||||
|
@ -283,6 +278,7 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
|
||||||
|
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
int chansize, numchan;
|
int chansize, numchan;
|
||||||
|
int r;
|
||||||
|
|
||||||
tmp = RREG32(mmMC_ARB_RAMCFG);
|
tmp = RREG32(mmMC_ARB_RAMCFG);
|
||||||
if (tmp & (1 << 11)) {
|
if (tmp & (1 << 11)) {
|
||||||
|
@ -324,12 +320,17 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
adev->mc.vram_width = numchan * chansize;
|
adev->mc.vram_width = numchan * chansize;
|
||||||
/* Could aper size report 0 ? */
|
|
||||||
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
|
||||||
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
|
||||||
/* size in MB on si */
|
/* size in MB on si */
|
||||||
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||||
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||||
|
|
||||||
|
if (!(adev->flags & AMD_IS_APU)) {
|
||||||
|
r = amdgpu_device_resize_fb_bar(adev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||||
|
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||||
|
|
||||||
/* set the gart size */
|
/* set the gart size */
|
||||||
|
@ -831,8 +832,7 @@ static int gmc_v6_0_sw_init(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
amdgpu_vm_adjust_size(adev, 64, 9);
|
amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
|
||||||
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
|
|
||||||
|
|
||||||
adev->mc.mc_mask = 0xffffffffffULL;
|
adev->mc.mc_mask = 0xffffffffffULL;
|
||||||
|
|
||||||
|
@ -877,7 +877,6 @@ static int gmc_v6_0_sw_init(void *handle)
|
||||||
* amdkfd will use VMIDs 8-15
|
* amdkfd will use VMIDs 8-15
|
||||||
*/
|
*/
|
||||||
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
|
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
|
||||||
adev->vm_manager.num_level = 1;
|
|
||||||
amdgpu_vm_manager_init(adev);
|
amdgpu_vm_manager_init(adev);
|
||||||
|
|
||||||
/* base offset of vram pages */
|
/* base offset of vram pages */
|
||||||
|
@ -897,9 +896,9 @@ static int gmc_v6_0_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
amdgpu_gem_force_release(adev);
|
||||||
amdgpu_vm_manager_fini(adev);
|
amdgpu_vm_manager_fini(adev);
|
||||||
gmc_v6_0_gart_fini(adev);
|
gmc_v6_0_gart_fini(adev);
|
||||||
amdgpu_gem_force_release(adev);
|
|
||||||
amdgpu_bo_fini(adev);
|
amdgpu_bo_fini(adev);
|
||||||
release_firmware(adev->mc.fw);
|
release_firmware(adev->mc.fw);
|
||||||
adev->mc.fw = NULL;
|
adev->mc.fw = NULL;
|
||||||
|
|
|
@ -69,10 +69,10 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_TOPAZ:
|
case CHIP_TOPAZ:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
iceland_mgcg_cgcg_init,
|
iceland_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
|
ARRAY_SIZE(iceland_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_iceland_a11,
|
golden_settings_iceland_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_iceland_a11));
|
ARRAY_SIZE(golden_settings_iceland_a11));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -240,12 +240,6 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||||
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
|
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
|
||||||
base <<= 24;
|
base <<= 24;
|
||||||
|
|
||||||
if (mc->mc_vram_size > 0xFFC0000000ULL) {
|
|
||||||
/* leave room for at least 1024M GTT */
|
|
||||||
dev_warn(adev->dev, "limiting VRAM\n");
|
|
||||||
mc->real_vram_size = 0xFFC0000000ULL;
|
|
||||||
mc->mc_vram_size = 0xFFC0000000ULL;
|
|
||||||
}
|
|
||||||
amdgpu_vram_location(adev, &adev->mc, base);
|
amdgpu_vram_location(adev, &adev->mc, base);
|
||||||
amdgpu_gart_location(adev, mc);
|
amdgpu_gart_location(adev, mc);
|
||||||
}
|
}
|
||||||
|
@ -322,6 +316,8 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
|
static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
|
adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
|
||||||
if (!adev->mc.vram_width) {
|
if (!adev->mc.vram_width) {
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
@ -367,13 +363,18 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
adev->mc.vram_width = numchan * chansize;
|
adev->mc.vram_width = numchan * chansize;
|
||||||
}
|
}
|
||||||
/* Could aper size report 0 ? */
|
|
||||||
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
|
||||||
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
|
||||||
/* size in MB on si */
|
/* size in MB on si */
|
||||||
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||||
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||||
|
|
||||||
|
if (!(adev->flags & AMD_IS_APU)) {
|
||||||
|
r = amdgpu_device_resize_fb_bar(adev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||||
|
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
if (adev->flags & AMD_IS_APU) {
|
if (adev->flags & AMD_IS_APU) {
|
||||||
adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
|
adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
|
||||||
|
@ -970,8 +971,7 @@ static int gmc_v7_0_sw_init(void *handle)
|
||||||
* Currently set to 4GB ((1 << 20) 4k pages).
|
* Currently set to 4GB ((1 << 20) 4k pages).
|
||||||
* Max GPUVM size for cayman and SI is 40 bits.
|
* Max GPUVM size for cayman and SI is 40 bits.
|
||||||
*/
|
*/
|
||||||
amdgpu_vm_adjust_size(adev, 64, 9);
|
amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
|
||||||
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
|
|
||||||
|
|
||||||
/* Set the internal MC address mask
|
/* Set the internal MC address mask
|
||||||
* This is the max address of the GPU's
|
* This is the max address of the GPU's
|
||||||
|
@ -1026,7 +1026,6 @@ static int gmc_v7_0_sw_init(void *handle)
|
||||||
* amdkfd will use VMIDs 8-15
|
* amdkfd will use VMIDs 8-15
|
||||||
*/
|
*/
|
||||||
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
|
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
|
||||||
adev->vm_manager.num_level = 1;
|
|
||||||
amdgpu_vm_manager_init(adev);
|
amdgpu_vm_manager_init(adev);
|
||||||
|
|
||||||
/* base offset of vram pages */
|
/* base offset of vram pages */
|
||||||
|
@ -1046,9 +1045,9 @@ static int gmc_v7_0_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
amdgpu_gem_force_release(adev);
|
||||||
amdgpu_vm_manager_fini(adev);
|
amdgpu_vm_manager_fini(adev);
|
||||||
gmc_v7_0_gart_fini(adev);
|
gmc_v7_0_gart_fini(adev);
|
||||||
amdgpu_gem_force_release(adev);
|
|
||||||
amdgpu_bo_fini(adev);
|
amdgpu_bo_fini(adev);
|
||||||
release_firmware(adev->mc.fw);
|
release_firmware(adev->mc.fw);
|
||||||
adev->mc.fw = NULL;
|
adev->mc.fw = NULL;
|
||||||
|
|
|
@ -122,42 +122,42 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_FIJI:
|
case CHIP_FIJI:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
fiji_mgcg_cgcg_init,
|
fiji_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
|
ARRAY_SIZE(fiji_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_fiji_a10,
|
golden_settings_fiji_a10,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_fiji_a10));
|
ARRAY_SIZE(golden_settings_fiji_a10));
|
||||||
break;
|
break;
|
||||||
case CHIP_TONGA:
|
case CHIP_TONGA:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
tonga_mgcg_cgcg_init,
|
tonga_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
|
ARRAY_SIZE(tonga_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_tonga_a11,
|
golden_settings_tonga_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
|
ARRAY_SIZE(golden_settings_tonga_a11));
|
||||||
break;
|
break;
|
||||||
case CHIP_POLARIS11:
|
case CHIP_POLARIS11:
|
||||||
case CHIP_POLARIS12:
|
case CHIP_POLARIS12:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_polaris11_a11,
|
golden_settings_polaris11_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
|
ARRAY_SIZE(golden_settings_polaris11_a11));
|
||||||
break;
|
break;
|
||||||
case CHIP_POLARIS10:
|
case CHIP_POLARIS10:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_polaris10_a11,
|
golden_settings_polaris10_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
|
ARRAY_SIZE(golden_settings_polaris10_a11));
|
||||||
break;
|
break;
|
||||||
case CHIP_CARRIZO:
|
case CHIP_CARRIZO:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
cz_mgcg_cgcg_init,
|
cz_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
|
ARRAY_SIZE(cz_mgcg_cgcg_init));
|
||||||
break;
|
break;
|
||||||
case CHIP_STONEY:
|
case CHIP_STONEY:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
stoney_mgcg_cgcg_init,
|
stoney_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_stoney_common,
|
golden_settings_stoney_common,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_stoney_common));
|
ARRAY_SIZE(golden_settings_stoney_common));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -405,12 +405,6 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||||
base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
|
base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
|
||||||
base <<= 24;
|
base <<= 24;
|
||||||
|
|
||||||
if (mc->mc_vram_size > 0xFFC0000000ULL) {
|
|
||||||
/* leave room for at least 1024M GTT */
|
|
||||||
dev_warn(adev->dev, "limiting VRAM\n");
|
|
||||||
mc->real_vram_size = 0xFFC0000000ULL;
|
|
||||||
mc->mc_vram_size = 0xFFC0000000ULL;
|
|
||||||
}
|
|
||||||
amdgpu_vram_location(adev, &adev->mc, base);
|
amdgpu_vram_location(adev, &adev->mc, base);
|
||||||
amdgpu_gart_location(adev, mc);
|
amdgpu_gart_location(adev, mc);
|
||||||
}
|
}
|
||||||
|
@ -498,6 +492,8 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
|
static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
|
adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
|
||||||
if (!adev->mc.vram_width) {
|
if (!adev->mc.vram_width) {
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
@ -543,13 +539,18 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
adev->mc.vram_width = numchan * chansize;
|
adev->mc.vram_width = numchan * chansize;
|
||||||
}
|
}
|
||||||
/* Could aper size report 0 ? */
|
|
||||||
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
|
||||||
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
|
||||||
/* size in MB on si */
|
/* size in MB on si */
|
||||||
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||||
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||||
|
|
||||||
|
if (!(adev->flags & AMD_IS_APU)) {
|
||||||
|
r = amdgpu_device_resize_fb_bar(adev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||||
|
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
if (adev->flags & AMD_IS_APU) {
|
if (adev->flags & AMD_IS_APU) {
|
||||||
adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
|
adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
|
||||||
|
@ -1067,8 +1068,7 @@ static int gmc_v8_0_sw_init(void *handle)
|
||||||
* Currently set to 4GB ((1 << 20) 4k pages).
|
* Currently set to 4GB ((1 << 20) 4k pages).
|
||||||
* Max GPUVM size for cayman and SI is 40 bits.
|
* Max GPUVM size for cayman and SI is 40 bits.
|
||||||
*/
|
*/
|
||||||
amdgpu_vm_adjust_size(adev, 64, 9);
|
amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
|
||||||
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
|
|
||||||
|
|
||||||
/* Set the internal MC address mask
|
/* Set the internal MC address mask
|
||||||
* This is the max address of the GPU's
|
* This is the max address of the GPU's
|
||||||
|
@ -1123,7 +1123,6 @@ static int gmc_v8_0_sw_init(void *handle)
|
||||||
* amdkfd will use VMIDs 8-15
|
* amdkfd will use VMIDs 8-15
|
||||||
*/
|
*/
|
||||||
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
|
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
|
||||||
adev->vm_manager.num_level = 1;
|
|
||||||
amdgpu_vm_manager_init(adev);
|
amdgpu_vm_manager_init(adev);
|
||||||
|
|
||||||
/* base offset of vram pages */
|
/* base offset of vram pages */
|
||||||
|
@ -1143,9 +1142,9 @@ static int gmc_v8_0_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
amdgpu_gem_force_release(adev);
|
||||||
amdgpu_vm_manager_fini(adev);
|
amdgpu_vm_manager_fini(adev);
|
||||||
gmc_v8_0_gart_fini(adev);
|
gmc_v8_0_gart_fini(adev);
|
||||||
amdgpu_gem_force_release(adev);
|
|
||||||
amdgpu_bo_fini(adev);
|
amdgpu_bo_fini(adev);
|
||||||
release_firmware(adev->mc.fw);
|
release_firmware(adev->mc.fw);
|
||||||
adev->mc.fw = NULL;
|
adev->mc.fw = NULL;
|
||||||
|
|
|
@ -25,17 +25,18 @@
|
||||||
#include "gmc_v9_0.h"
|
#include "gmc_v9_0.h"
|
||||||
#include "amdgpu_atomfirmware.h"
|
#include "amdgpu_atomfirmware.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "vega10/HDP/hdp_4_0_offset.h"
|
#include "hdp/hdp_4_0_offset.h"
|
||||||
#include "vega10/HDP/hdp_4_0_sh_mask.h"
|
#include "hdp/hdp_4_0_sh_mask.h"
|
||||||
#include "vega10/GC/gc_9_0_sh_mask.h"
|
#include "gc/gc_9_0_sh_mask.h"
|
||||||
#include "vega10/DC/dce_12_0_offset.h"
|
#include "dce/dce_12_0_offset.h"
|
||||||
#include "vega10/DC/dce_12_0_sh_mask.h"
|
#include "dce/dce_12_0_sh_mask.h"
|
||||||
#include "vega10/vega10_enum.h"
|
#include "vega10_enum.h"
|
||||||
#include "vega10/MMHUB/mmhub_1_0_offset.h"
|
#include "mmhub/mmhub_1_0_offset.h"
|
||||||
#include "vega10/ATHUB/athub_1_0_offset.h"
|
#include "athub/athub_1_0_offset.h"
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
#include "umc/umc_6_0_sh_mask.h"
|
||||||
|
|
||||||
#include "nbio_v6_1.h"
|
#include "nbio_v6_1.h"
|
||||||
#include "nbio_v7_0.h"
|
#include "nbio_v7_0.h"
|
||||||
|
@ -85,6 +86,121 @@ static const u32 golden_settings_athub_1_0_0[] =
|
||||||
SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
|
SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Ecc related register addresses, (BASE + reg offset) */
|
||||||
|
/* Universal Memory Controller caps (may be fused). */
|
||||||
|
/* UMCCH:UmcLocalCap */
|
||||||
|
#define UMCLOCALCAPS_ADDR0 (0x00014306 + 0x00000000)
|
||||||
|
#define UMCLOCALCAPS_ADDR1 (0x00014306 + 0x00000800)
|
||||||
|
#define UMCLOCALCAPS_ADDR2 (0x00014306 + 0x00001000)
|
||||||
|
#define UMCLOCALCAPS_ADDR3 (0x00014306 + 0x00001800)
|
||||||
|
#define UMCLOCALCAPS_ADDR4 (0x00054306 + 0x00000000)
|
||||||
|
#define UMCLOCALCAPS_ADDR5 (0x00054306 + 0x00000800)
|
||||||
|
#define UMCLOCALCAPS_ADDR6 (0x00054306 + 0x00001000)
|
||||||
|
#define UMCLOCALCAPS_ADDR7 (0x00054306 + 0x00001800)
|
||||||
|
#define UMCLOCALCAPS_ADDR8 (0x00094306 + 0x00000000)
|
||||||
|
#define UMCLOCALCAPS_ADDR9 (0x00094306 + 0x00000800)
|
||||||
|
#define UMCLOCALCAPS_ADDR10 (0x00094306 + 0x00001000)
|
||||||
|
#define UMCLOCALCAPS_ADDR11 (0x00094306 + 0x00001800)
|
||||||
|
#define UMCLOCALCAPS_ADDR12 (0x000d4306 + 0x00000000)
|
||||||
|
#define UMCLOCALCAPS_ADDR13 (0x000d4306 + 0x00000800)
|
||||||
|
#define UMCLOCALCAPS_ADDR14 (0x000d4306 + 0x00001000)
|
||||||
|
#define UMCLOCALCAPS_ADDR15 (0x000d4306 + 0x00001800)
|
||||||
|
|
||||||
|
/* Universal Memory Controller Channel config. */
|
||||||
|
/* UMCCH:UMC_CONFIG */
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR0 (0x00014040 + 0x00000000)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR1 (0x00014040 + 0x00000800)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR2 (0x00014040 + 0x00001000)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR3 (0x00014040 + 0x00001800)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR4 (0x00054040 + 0x00000000)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR5 (0x00054040 + 0x00000800)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR6 (0x00054040 + 0x00001000)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR7 (0x00054040 + 0x00001800)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR8 (0x00094040 + 0x00000000)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR9 (0x00094040 + 0x00000800)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000)
|
||||||
|
#define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800)
|
||||||
|
|
||||||
|
/* Universal Memory Controller Channel Ecc config. */
|
||||||
|
/* UMCCH:EccCtrl */
|
||||||
|
#define UMCCH_ECCCTRL_ADDR0 (0x00014053 + 0x00000000)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR1 (0x00014053 + 0x00000800)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR2 (0x00014053 + 0x00001000)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR3 (0x00014053 + 0x00001800)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR4 (0x00054053 + 0x00000000)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR5 (0x00054053 + 0x00000800)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR6 (0x00054053 + 0x00001000)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR7 (0x00054053 + 0x00001800)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR8 (0x00094053 + 0x00000000)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR9 (0x00094053 + 0x00000800)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR10 (0x00094053 + 0x00001000)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR11 (0x00094053 + 0x00001800)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR12 (0x000d4053 + 0x00000000)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR13 (0x000d4053 + 0x00000800)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR14 (0x000d4053 + 0x00001000)
|
||||||
|
#define UMCCH_ECCCTRL_ADDR15 (0x000d4053 + 0x00001800)
|
||||||
|
|
||||||
|
static const uint32_t ecc_umclocalcap_addrs[] = {
|
||||||
|
UMCLOCALCAPS_ADDR0,
|
||||||
|
UMCLOCALCAPS_ADDR1,
|
||||||
|
UMCLOCALCAPS_ADDR2,
|
||||||
|
UMCLOCALCAPS_ADDR3,
|
||||||
|
UMCLOCALCAPS_ADDR4,
|
||||||
|
UMCLOCALCAPS_ADDR5,
|
||||||
|
UMCLOCALCAPS_ADDR6,
|
||||||
|
UMCLOCALCAPS_ADDR7,
|
||||||
|
UMCLOCALCAPS_ADDR8,
|
||||||
|
UMCLOCALCAPS_ADDR9,
|
||||||
|
UMCLOCALCAPS_ADDR10,
|
||||||
|
UMCLOCALCAPS_ADDR11,
|
||||||
|
UMCLOCALCAPS_ADDR12,
|
||||||
|
UMCLOCALCAPS_ADDR13,
|
||||||
|
UMCLOCALCAPS_ADDR14,
|
||||||
|
UMCLOCALCAPS_ADDR15,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const uint32_t ecc_umcch_umc_config_addrs[] = {
|
||||||
|
UMCCH_UMC_CONFIG_ADDR0,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR1,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR2,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR3,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR4,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR5,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR6,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR7,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR8,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR9,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR10,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR11,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR12,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR13,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR14,
|
||||||
|
UMCCH_UMC_CONFIG_ADDR15,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const uint32_t ecc_umcch_eccctrl_addrs[] = {
|
||||||
|
UMCCH_ECCCTRL_ADDR0,
|
||||||
|
UMCCH_ECCCTRL_ADDR1,
|
||||||
|
UMCCH_ECCCTRL_ADDR2,
|
||||||
|
UMCCH_ECCCTRL_ADDR3,
|
||||||
|
UMCCH_ECCCTRL_ADDR4,
|
||||||
|
UMCCH_ECCCTRL_ADDR5,
|
||||||
|
UMCCH_ECCCTRL_ADDR6,
|
||||||
|
UMCCH_ECCCTRL_ADDR7,
|
||||||
|
UMCCH_ECCCTRL_ADDR8,
|
||||||
|
UMCCH_ECCCTRL_ADDR9,
|
||||||
|
UMCCH_ECCCTRL_ADDR10,
|
||||||
|
UMCCH_ECCCTRL_ADDR11,
|
||||||
|
UMCCH_ECCCTRL_ADDR12,
|
||||||
|
UMCCH_ECCCTRL_ADDR13,
|
||||||
|
UMCCH_ECCCTRL_ADDR14,
|
||||||
|
UMCCH_ECCCTRL_ADDR15,
|
||||||
|
};
|
||||||
|
|
||||||
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
|
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
|
||||||
struct amdgpu_irq_src *src,
|
struct amdgpu_irq_src *src,
|
||||||
unsigned type,
|
unsigned type,
|
||||||
|
@ -389,6 +505,85 @@ static int gmc_v9_0_early_init(void *handle)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
uint32_t reg_val;
|
||||||
|
uint32_t reg_addr;
|
||||||
|
uint32_t field_val;
|
||||||
|
size_t i;
|
||||||
|
uint32_t fv2;
|
||||||
|
size_t lost_sheep;
|
||||||
|
|
||||||
|
DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
|
||||||
|
|
||||||
|
lost_sheep = 0;
|
||||||
|
for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
|
||||||
|
reg_addr = ecc_umclocalcap_addrs[i];
|
||||||
|
DRM_DEBUG("ecc: "
|
||||||
|
"UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
|
||||||
|
i, reg_addr);
|
||||||
|
reg_val = RREG32(reg_addr);
|
||||||
|
field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
|
||||||
|
EccDis);
|
||||||
|
DRM_DEBUG("ecc: "
|
||||||
|
"reg_val: 0x%08x, "
|
||||||
|
"EccDis: 0x%08x, ",
|
||||||
|
reg_val, field_val);
|
||||||
|
if (field_val) {
|
||||||
|
DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
|
||||||
|
++lost_sheep;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
|
||||||
|
reg_addr = ecc_umcch_umc_config_addrs[i];
|
||||||
|
DRM_DEBUG("ecc: "
|
||||||
|
"UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
|
||||||
|
i, reg_addr);
|
||||||
|
reg_val = RREG32(reg_addr);
|
||||||
|
field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
|
||||||
|
DramReady);
|
||||||
|
DRM_DEBUG("ecc: "
|
||||||
|
"reg_val: 0x%08x, "
|
||||||
|
"DramReady: 0x%08x\n",
|
||||||
|
reg_val, field_val);
|
||||||
|
|
||||||
|
if (!field_val) {
|
||||||
|
DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
|
||||||
|
++lost_sheep;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
|
||||||
|
reg_addr = ecc_umcch_eccctrl_addrs[i];
|
||||||
|
DRM_DEBUG("ecc: "
|
||||||
|
"UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
|
||||||
|
i, reg_addr);
|
||||||
|
reg_val = RREG32(reg_addr);
|
||||||
|
field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
|
||||||
|
WrEccEn);
|
||||||
|
fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
|
||||||
|
RdEccEn);
|
||||||
|
DRM_DEBUG("ecc: "
|
||||||
|
"reg_val: 0x%08x, "
|
||||||
|
"WrEccEn: 0x%08x, "
|
||||||
|
"RdEccEn: 0x%08x\n",
|
||||||
|
reg_val, field_val, fv2);
|
||||||
|
|
||||||
|
if (!field_val) {
|
||||||
|
DRM_DEBUG("ecc: WrEccEn is not set\n");
|
||||||
|
++lost_sheep;
|
||||||
|
}
|
||||||
|
if (!fv2) {
|
||||||
|
DRM_DEBUG("ecc: RdEccEn is not set\n");
|
||||||
|
++lost_sheep;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
|
||||||
|
return lost_sheep == 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int gmc_v9_0_late_init(void *handle)
|
static int gmc_v9_0_late_init(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
@ -403,6 +598,7 @@ static int gmc_v9_0_late_init(void *handle)
|
||||||
*/
|
*/
|
||||||
unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
|
unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
int r;
|
||||||
|
|
||||||
for(i = 0; i < adev->num_rings; ++i) {
|
for(i = 0; i < adev->num_rings; ++i) {
|
||||||
struct amdgpu_ring *ring = adev->rings[i];
|
struct amdgpu_ring *ring = adev->rings[i];
|
||||||
|
@ -418,6 +614,16 @@ static int gmc_v9_0_late_init(void *handle)
|
||||||
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
|
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
|
||||||
BUG_ON(vm_inv_eng[i] > 16);
|
BUG_ON(vm_inv_eng[i] > 16);
|
||||||
|
|
||||||
|
r = gmc_v9_0_ecc_available(adev);
|
||||||
|
if (r == 1) {
|
||||||
|
DRM_INFO("ECC is active.\n");
|
||||||
|
} else if (r == 0) {
|
||||||
|
DRM_INFO("ECC is not present.\n");
|
||||||
|
} else {
|
||||||
|
DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -449,6 +655,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
int chansize, numchan;
|
int chansize, numchan;
|
||||||
|
int r;
|
||||||
|
|
||||||
adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
|
adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
|
||||||
if (!adev->mc.vram_width) {
|
if (!adev->mc.vram_width) {
|
||||||
|
@ -491,17 +698,22 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
||||||
adev->mc.vram_width = numchan * chansize;
|
adev->mc.vram_width = numchan * chansize;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Could aper size report 0 ? */
|
|
||||||
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
|
||||||
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
|
||||||
/* size in MB on si */
|
/* size in MB on si */
|
||||||
adev->mc.mc_vram_size =
|
adev->mc.mc_vram_size =
|
||||||
((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
|
((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
|
||||||
nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
|
nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
|
||||||
adev->mc.real_vram_size = adev->mc.mc_vram_size;
|
adev->mc.real_vram_size = adev->mc.mc_vram_size;
|
||||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
|
||||||
|
if (!(adev->flags & AMD_IS_APU)) {
|
||||||
|
r = amdgpu_device_resize_fb_bar(adev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||||
|
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||||
|
|
||||||
/* In case the PCI BAR is larger than the actual amount of vram */
|
/* In case the PCI BAR is larger than the actual amount of vram */
|
||||||
|
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||||
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
|
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
|
||||||
adev->mc.visible_vram_size = adev->mc.real_vram_size;
|
adev->mc.visible_vram_size = adev->mc.real_vram_size;
|
||||||
|
|
||||||
|
@ -557,16 +769,11 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||||
if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
|
if (adev->rev_id == 0x0 || adev->rev_id == 0x1)
|
||||||
adev->vm_manager.vm_size = 1U << 18;
|
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
|
||||||
adev->vm_manager.block_size = 9;
|
else
|
||||||
adev->vm_manager.num_level = 3;
|
|
||||||
amdgpu_vm_set_fragment_size(adev, 9);
|
|
||||||
} else {
|
|
||||||
/* vm_size is 64GB for legacy 2-level page support */
|
/* vm_size is 64GB for legacy 2-level page support */
|
||||||
amdgpu_vm_adjust_size(adev, 64, 9);
|
amdgpu_vm_adjust_size(adev, 64, 9, 1, 48);
|
||||||
adev->vm_manager.num_level = 1;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
/* XXX Don't know how to get VRAM type yet. */
|
/* XXX Don't know how to get VRAM type yet. */
|
||||||
|
@ -576,20 +783,12 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||||
* vm size is 256TB (48bit), maximum size of Vega10,
|
* vm size is 256TB (48bit), maximum size of Vega10,
|
||||||
* block size 512 (9bit)
|
* block size 512 (9bit)
|
||||||
*/
|
*/
|
||||||
adev->vm_manager.vm_size = 1U << 18;
|
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
|
||||||
adev->vm_manager.block_size = 9;
|
|
||||||
adev->vm_manager.num_level = 3;
|
|
||||||
amdgpu_vm_set_fragment_size(adev, 9);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_INFO("vm size is %llu GB, block size is %u-bit,fragment size is %u-bit\n",
|
|
||||||
adev->vm_manager.vm_size,
|
|
||||||
adev->vm_manager.block_size,
|
|
||||||
adev->vm_manager.fragment_size);
|
|
||||||
|
|
||||||
/* This interrupt is VMC page fault.*/
|
/* This interrupt is VMC page fault.*/
|
||||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
|
||||||
&adev->mc.vm_fault);
|
&adev->mc.vm_fault);
|
||||||
|
@ -599,8 +798,6 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
|
|
||||||
|
|
||||||
/* Set the internal MC address mask
|
/* Set the internal MC address mask
|
||||||
* This is the max address of the GPU's
|
* This is the max address of the GPU's
|
||||||
* internal address space.
|
* internal address space.
|
||||||
|
@ -660,7 +857,7 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gmc_v8_0_gart_fini - vm fini callback
|
* gmc_v9_0_gart_fini - vm fini callback
|
||||||
*
|
*
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
*
|
*
|
||||||
|
@ -676,9 +873,9 @@ static int gmc_v9_0_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
amdgpu_gem_force_release(adev);
|
||||||
amdgpu_vm_manager_fini(adev);
|
amdgpu_vm_manager_fini(adev);
|
||||||
gmc_v9_0_gart_fini(adev);
|
gmc_v9_0_gart_fini(adev);
|
||||||
amdgpu_gem_force_release(adev);
|
|
||||||
amdgpu_bo_fini(adev);
|
amdgpu_bo_fini(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -690,15 +887,15 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_mmhub_1_0_0,
|
golden_settings_mmhub_1_0_0,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_mmhub_1_0_0));
|
ARRAY_SIZE(golden_settings_mmhub_1_0_0));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_athub_1_0_0,
|
golden_settings_athub_1_0_0,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
|
ARRAY_SIZE(golden_settings_athub_1_0_0));
|
||||||
break;
|
break;
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_athub_1_0_0,
|
golden_settings_athub_1_0_0,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
|
ARRAY_SIZE(golden_settings_athub_1_0_0));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -718,7 +915,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
||||||
|
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_vega10_hdp,
|
golden_settings_vega10_hdp,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
|
ARRAY_SIZE(golden_settings_vega10_hdp));
|
||||||
|
|
||||||
if (adev->gart.robj == NULL) {
|
if (adev->gart.robj == NULL) {
|
||||||
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
|
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
|
||||||
|
|
|
@ -23,14 +23,13 @@
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "mmhub_v1_0.h"
|
#include "mmhub_v1_0.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "vega10/MMHUB/mmhub_1_0_offset.h"
|
#include "mmhub/mmhub_1_0_offset.h"
|
||||||
#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
|
#include "mmhub/mmhub_1_0_sh_mask.h"
|
||||||
#include "vega10/MMHUB/mmhub_1_0_default.h"
|
#include "mmhub/mmhub_1_0_default.h"
|
||||||
#include "vega10/ATHUB/athub_1_0_offset.h"
|
#include "athub/athub_1_0_offset.h"
|
||||||
#include "vega10/ATHUB/athub_1_0_sh_mask.h"
|
#include "athub/athub_1_0_sh_mask.h"
|
||||||
#include "vega10/ATHUB/athub_1_0_default.h"
|
#include "vega10_enum.h"
|
||||||
#include "vega10/vega10_enum.h"
|
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
|
||||||
|
|
|
@ -22,11 +22,11 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "vega10/NBIO/nbio_6_1_offset.h"
|
#include "nbio/nbio_6_1_offset.h"
|
||||||
#include "vega10/NBIO/nbio_6_1_sh_mask.h"
|
#include "nbio/nbio_6_1_sh_mask.h"
|
||||||
#include "vega10/GC/gc_9_0_offset.h"
|
#include "gc/gc_9_0_offset.h"
|
||||||
#include "vega10/GC/gc_9_0_sh_mask.h"
|
#include "gc/gc_9_0_sh_mask.h"
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
#include "vega10_ih.h"
|
#include "vega10_ih.h"
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
@ -254,7 +254,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Trigger recovery due to world switch failure */
|
/* Trigger recovery due to world switch failure */
|
||||||
amdgpu_sriov_gpu_reset(adev, NULL);
|
amdgpu_gpu_recover(adev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||||
|
@ -282,9 +282,17 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||||
/* see what event we get */
|
/* see what event we get */
|
||||||
r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
|
r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
|
||||||
|
|
||||||
/* only handle FLR_NOTIFY now */
|
/* sometimes the interrupt is delayed to inject to VM, so under such case
|
||||||
if (!r)
|
* the IDH_FLR_NOTIFICATION is overwritten by VF FLR from GIM side, thus
|
||||||
schedule_work(&adev->virt.flr_work);
|
* above recieve message could be failed, we should schedule the flr_work
|
||||||
|
* anyway
|
||||||
|
*/
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("FLR_NOTIFICATION is missed\n");
|
||||||
|
xgpu_ai_mailbox_send_ack(adev);
|
||||||
|
}
|
||||||
|
|
||||||
|
schedule_work(&adev->virt.flr_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -353,5 +361,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
|
||||||
.req_full_gpu = xgpu_ai_request_full_gpu_access,
|
.req_full_gpu = xgpu_ai_request_full_gpu_access,
|
||||||
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
|
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
|
||||||
.reset_gpu = xgpu_ai_request_reset,
|
.reset_gpu = xgpu_ai_request_reset,
|
||||||
|
.wait_reset = NULL,
|
||||||
.trans_msg = xgpu_ai_mailbox_trans_msg,
|
.trans_msg = xgpu_ai_mailbox_trans_msg,
|
||||||
};
|
};
|
||||||
|
|
|
@ -281,29 +281,29 @@ void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_FIJI:
|
case CHIP_FIJI:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
xgpu_fiji_mgcg_cgcg_init,
|
xgpu_fiji_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(
|
ARRAY_SIZE(
|
||||||
xgpu_fiji_mgcg_cgcg_init));
|
xgpu_fiji_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
xgpu_fiji_golden_settings_a10,
|
xgpu_fiji_golden_settings_a10,
|
||||||
(const u32)ARRAY_SIZE(
|
ARRAY_SIZE(
|
||||||
xgpu_fiji_golden_settings_a10));
|
xgpu_fiji_golden_settings_a10));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
xgpu_fiji_golden_common_all,
|
xgpu_fiji_golden_common_all,
|
||||||
(const u32)ARRAY_SIZE(
|
ARRAY_SIZE(
|
||||||
xgpu_fiji_golden_common_all));
|
xgpu_fiji_golden_common_all));
|
||||||
break;
|
break;
|
||||||
case CHIP_TONGA:
|
case CHIP_TONGA:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
xgpu_tonga_mgcg_cgcg_init,
|
xgpu_tonga_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(
|
ARRAY_SIZE(
|
||||||
xgpu_tonga_mgcg_cgcg_init));
|
xgpu_tonga_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
xgpu_tonga_golden_settings_a11,
|
xgpu_tonga_golden_settings_a11,
|
||||||
(const u32)ARRAY_SIZE(
|
ARRAY_SIZE(
|
||||||
xgpu_tonga_golden_settings_a11));
|
xgpu_tonga_golden_settings_a11));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
xgpu_tonga_golden_common_all,
|
xgpu_tonga_golden_common_all,
|
||||||
(const u32)ARRAY_SIZE(
|
ARRAY_SIZE(
|
||||||
xgpu_tonga_golden_common_all));
|
xgpu_tonga_golden_common_all));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -446,8 +446,10 @@ static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
|
||||||
request == IDH_REQ_GPU_FINI_ACCESS ||
|
request == IDH_REQ_GPU_FINI_ACCESS ||
|
||||||
request == IDH_REQ_GPU_RESET_ACCESS) {
|
request == IDH_REQ_GPU_RESET_ACCESS) {
|
||||||
r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
|
r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
|
||||||
if (r)
|
if (r) {
|
||||||
pr_err("Doesn't get ack from pf, continue\n");
|
pr_err("Doesn't get ack from pf, give up\n");
|
||||||
|
return r;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -458,6 +460,11 @@ static int xgpu_vi_request_reset(struct amdgpu_device *adev)
|
||||||
return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
|
return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int xgpu_vi_wait_reset_cmpl(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
return xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
|
||||||
|
}
|
||||||
|
|
||||||
static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
|
static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
|
||||||
bool init)
|
bool init)
|
||||||
{
|
{
|
||||||
|
@ -514,7 +521,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Trigger recovery due to world switch failure */
|
/* Trigger recovery due to world switch failure */
|
||||||
amdgpu_sriov_gpu_reset(adev, NULL);
|
amdgpu_gpu_recover(adev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||||
|
@ -613,5 +620,6 @@ const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
|
||||||
.req_full_gpu = xgpu_vi_request_full_gpu_access,
|
.req_full_gpu = xgpu_vi_request_full_gpu_access,
|
||||||
.rel_full_gpu = xgpu_vi_release_full_gpu_access,
|
.rel_full_gpu = xgpu_vi_release_full_gpu_access,
|
||||||
.reset_gpu = xgpu_vi_request_reset,
|
.reset_gpu = xgpu_vi_request_reset,
|
||||||
|
.wait_reset = xgpu_vi_wait_reset_cmpl,
|
||||||
.trans_msg = NULL, /* Does not need to trans VF errors to host. */
|
.trans_msg = NULL, /* Does not need to trans VF errors to host. */
|
||||||
};
|
};
|
||||||
|
|
|
@ -24,11 +24,11 @@
|
||||||
#include "amdgpu_atombios.h"
|
#include "amdgpu_atombios.h"
|
||||||
#include "nbio_v6_1.h"
|
#include "nbio_v6_1.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "vega10/NBIO/nbio_6_1_default.h"
|
#include "nbio/nbio_6_1_default.h"
|
||||||
#include "vega10/NBIO/nbio_6_1_offset.h"
|
#include "nbio/nbio_6_1_offset.h"
|
||||||
#include "vega10/NBIO/nbio_6_1_sh_mask.h"
|
#include "nbio/nbio_6_1_sh_mask.h"
|
||||||
#include "vega10/vega10_enum.h"
|
#include "vega10_enum.h"
|
||||||
|
|
||||||
#define smnCPM_CONTROL 0x11180460
|
#define smnCPM_CONTROL 0x11180460
|
||||||
#define smnPCIE_CNTL2 0x11180070
|
#define smnPCIE_CNTL2 0x11180070
|
||||||
|
|
|
@ -24,11 +24,11 @@
|
||||||
#include "amdgpu_atombios.h"
|
#include "amdgpu_atombios.h"
|
||||||
#include "nbio_v7_0.h"
|
#include "nbio_v7_0.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "raven1/NBIO/nbio_7_0_default.h"
|
#include "nbio/nbio_7_0_default.h"
|
||||||
#include "raven1/NBIO/nbio_7_0_offset.h"
|
#include "nbio/nbio_7_0_offset.h"
|
||||||
#include "raven1/NBIO/nbio_7_0_sh_mask.h"
|
#include "nbio/nbio_7_0_sh_mask.h"
|
||||||
#include "vega10/vega10_enum.h"
|
#include "vega10_enum.h"
|
||||||
|
|
||||||
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
|
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
|
||||||
|
|
||||||
|
|
|
@ -30,10 +30,10 @@
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
#include "psp_v10_0.h"
|
#include "psp_v10_0.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "raven1/MP/mp_10_0_offset.h"
|
#include "mp/mp_10_0_offset.h"
|
||||||
#include "raven1/GC/gc_9_1_offset.h"
|
#include "gc/gc_9_1_offset.h"
|
||||||
#include "raven1/SDMA0/sdma0_4_1_offset.h"
|
#include "sdma0/sdma0_4_1_offset.h"
|
||||||
|
|
||||||
MODULE_FIRMWARE("amdgpu/raven_asd.bin");
|
MODULE_FIRMWARE("amdgpu/raven_asd.bin");
|
||||||
|
|
||||||
|
|
|
@ -31,12 +31,12 @@
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
#include "psp_v3_1.h"
|
#include "psp_v3_1.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "vega10/MP/mp_9_0_offset.h"
|
#include "mp/mp_9_0_offset.h"
|
||||||
#include "vega10/MP/mp_9_0_sh_mask.h"
|
#include "mp/mp_9_0_sh_mask.h"
|
||||||
#include "vega10/GC/gc_9_0_offset.h"
|
#include "gc/gc_9_0_offset.h"
|
||||||
#include "vega10/SDMA0/sdma0_4_0_offset.h"
|
#include "sdma0/sdma0_4_0_offset.h"
|
||||||
#include "vega10/NBIO/nbio_6_1_offset.h"
|
#include "nbio/nbio_6_1_offset.h"
|
||||||
|
|
||||||
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
|
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
|
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
|
||||||
|
|
|
@ -95,10 +95,10 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_TOPAZ:
|
case CHIP_TOPAZ:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
iceland_mgcg_cgcg_init,
|
iceland_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
|
ARRAY_SIZE(iceland_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_iceland_a11,
|
golden_settings_iceland_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_iceland_a11));
|
ARRAY_SIZE(golden_settings_iceland_a11));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -633,7 +633,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
||||||
ring->idx, tmp);
|
ring->idx, tmp);
|
||||||
|
@ -704,7 +704,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
}
|
}
|
||||||
tmp = le32_to_cpu(adev->wb.wb[index]);
|
tmp = le32_to_cpu(adev->wb.wb[index]);
|
||||||
if (tmp == 0xDEADBEEF) {
|
if (tmp == 0xDEADBEEF) {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
|
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
|
||||||
|
|
|
@ -194,45 +194,45 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_FIJI:
|
case CHIP_FIJI:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
fiji_mgcg_cgcg_init,
|
fiji_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
|
ARRAY_SIZE(fiji_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_fiji_a10,
|
golden_settings_fiji_a10,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_fiji_a10));
|
ARRAY_SIZE(golden_settings_fiji_a10));
|
||||||
break;
|
break;
|
||||||
case CHIP_TONGA:
|
case CHIP_TONGA:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
tonga_mgcg_cgcg_init,
|
tonga_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
|
ARRAY_SIZE(tonga_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_tonga_a11,
|
golden_settings_tonga_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
|
ARRAY_SIZE(golden_settings_tonga_a11));
|
||||||
break;
|
break;
|
||||||
case CHIP_POLARIS11:
|
case CHIP_POLARIS11:
|
||||||
case CHIP_POLARIS12:
|
case CHIP_POLARIS12:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_polaris11_a11,
|
golden_settings_polaris11_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
|
ARRAY_SIZE(golden_settings_polaris11_a11));
|
||||||
break;
|
break;
|
||||||
case CHIP_POLARIS10:
|
case CHIP_POLARIS10:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_polaris10_a11,
|
golden_settings_polaris10_a11,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
|
ARRAY_SIZE(golden_settings_polaris10_a11));
|
||||||
break;
|
break;
|
||||||
case CHIP_CARRIZO:
|
case CHIP_CARRIZO:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
cz_mgcg_cgcg_init,
|
cz_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
|
ARRAY_SIZE(cz_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
cz_golden_settings_a11,
|
cz_golden_settings_a11,
|
||||||
(const u32)ARRAY_SIZE(cz_golden_settings_a11));
|
ARRAY_SIZE(cz_golden_settings_a11));
|
||||||
break;
|
break;
|
||||||
case CHIP_STONEY:
|
case CHIP_STONEY:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
stoney_mgcg_cgcg_init,
|
stoney_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
stoney_golden_settings_a11,
|
stoney_golden_settings_a11,
|
||||||
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
|
ARRAY_SIZE(stoney_golden_settings_a11));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -893,7 +893,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
||||||
ring->idx, tmp);
|
ring->idx, tmp);
|
||||||
|
@ -964,7 +964,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
}
|
}
|
||||||
tmp = le32_to_cpu(adev->wb.wb[index]);
|
tmp = le32_to_cpu(adev->wb.wb[index]);
|
||||||
if (tmp == 0xDEADBEEF) {
|
if (tmp == 0xDEADBEEF) {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
|
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
|
||||||
|
|
|
@ -27,15 +27,15 @@
|
||||||
#include "amdgpu_ucode.h"
|
#include "amdgpu_ucode.h"
|
||||||
#include "amdgpu_trace.h"
|
#include "amdgpu_trace.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "vega10/SDMA0/sdma0_4_0_offset.h"
|
#include "sdma0/sdma0_4_0_offset.h"
|
||||||
#include "vega10/SDMA0/sdma0_4_0_sh_mask.h"
|
#include "sdma0/sdma0_4_0_sh_mask.h"
|
||||||
#include "vega10/SDMA1/sdma1_4_0_offset.h"
|
#include "sdma1/sdma1_4_0_offset.h"
|
||||||
#include "vega10/SDMA1/sdma1_4_0_sh_mask.h"
|
#include "sdma1/sdma1_4_0_sh_mask.h"
|
||||||
#include "vega10/MMHUB/mmhub_1_0_offset.h"
|
#include "mmhub/mmhub_1_0_offset.h"
|
||||||
#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
|
#include "mmhub/mmhub_1_0_sh_mask.h"
|
||||||
#include "vega10/HDP/hdp_4_0_offset.h"
|
#include "hdp/hdp_4_0_offset.h"
|
||||||
#include "raven1/SDMA0/sdma0_4_1_default.h"
|
#include "sdma0/sdma0_4_1_default.h"
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
|
@ -132,18 +132,18 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_sdma_4,
|
golden_settings_sdma_4,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_sdma_4));
|
ARRAY_SIZE(golden_settings_sdma_4));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_sdma_vg10,
|
golden_settings_sdma_vg10,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_sdma_vg10));
|
ARRAY_SIZE(golden_settings_sdma_vg10));
|
||||||
break;
|
break;
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_sdma_4_1,
|
golden_settings_sdma_4_1,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_sdma_4_1));
|
ARRAY_SIZE(golden_settings_sdma_4_1));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
golden_settings_sdma_rv1,
|
golden_settings_sdma_rv1,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_sdma_rv1));
|
ARRAY_SIZE(golden_settings_sdma_rv1));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -919,7 +919,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
||||||
ring->idx, tmp);
|
ring->idx, tmp);
|
||||||
|
@ -990,7 +990,7 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
}
|
}
|
||||||
tmp = le32_to_cpu(adev->wb.wb[index]);
|
tmp = le32_to_cpu(adev->wb.wb[index]);
|
||||||
if (tmp == 0xDEADBEEF) {
|
if (tmp == 0xDEADBEEF) {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
|
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
|
||||||
|
|
|
@ -1392,63 +1392,63 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_TAHITI:
|
case CHIP_TAHITI:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
tahiti_golden_registers,
|
tahiti_golden_registers,
|
||||||
(const u32)ARRAY_SIZE(tahiti_golden_registers));
|
ARRAY_SIZE(tahiti_golden_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
tahiti_golden_rlc_registers,
|
tahiti_golden_rlc_registers,
|
||||||
(const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
|
ARRAY_SIZE(tahiti_golden_rlc_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
tahiti_mgcg_cgcg_init,
|
tahiti_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
|
ARRAY_SIZE(tahiti_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
tahiti_golden_registers2,
|
tahiti_golden_registers2,
|
||||||
(const u32)ARRAY_SIZE(tahiti_golden_registers2));
|
ARRAY_SIZE(tahiti_golden_registers2));
|
||||||
break;
|
break;
|
||||||
case CHIP_PITCAIRN:
|
case CHIP_PITCAIRN:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
pitcairn_golden_registers,
|
pitcairn_golden_registers,
|
||||||
(const u32)ARRAY_SIZE(pitcairn_golden_registers));
|
ARRAY_SIZE(pitcairn_golden_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
pitcairn_golden_rlc_registers,
|
pitcairn_golden_rlc_registers,
|
||||||
(const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
|
ARRAY_SIZE(pitcairn_golden_rlc_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
pitcairn_mgcg_cgcg_init,
|
pitcairn_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
|
ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
|
||||||
break;
|
break;
|
||||||
case CHIP_VERDE:
|
case CHIP_VERDE:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
verde_golden_registers,
|
verde_golden_registers,
|
||||||
(const u32)ARRAY_SIZE(verde_golden_registers));
|
ARRAY_SIZE(verde_golden_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
verde_golden_rlc_registers,
|
verde_golden_rlc_registers,
|
||||||
(const u32)ARRAY_SIZE(verde_golden_rlc_registers));
|
ARRAY_SIZE(verde_golden_rlc_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
verde_mgcg_cgcg_init,
|
verde_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
|
ARRAY_SIZE(verde_mgcg_cgcg_init));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
verde_pg_init,
|
verde_pg_init,
|
||||||
(const u32)ARRAY_SIZE(verde_pg_init));
|
ARRAY_SIZE(verde_pg_init));
|
||||||
break;
|
break;
|
||||||
case CHIP_OLAND:
|
case CHIP_OLAND:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
oland_golden_registers,
|
oland_golden_registers,
|
||||||
(const u32)ARRAY_SIZE(oland_golden_registers));
|
ARRAY_SIZE(oland_golden_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
oland_golden_rlc_registers,
|
oland_golden_rlc_registers,
|
||||||
(const u32)ARRAY_SIZE(oland_golden_rlc_registers));
|
ARRAY_SIZE(oland_golden_rlc_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
oland_mgcg_cgcg_init,
|
oland_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
|
ARRAY_SIZE(oland_mgcg_cgcg_init));
|
||||||
break;
|
break;
|
||||||
case CHIP_HAINAN:
|
case CHIP_HAINAN:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
hainan_golden_registers,
|
hainan_golden_registers,
|
||||||
(const u32)ARRAY_SIZE(hainan_golden_registers));
|
ARRAY_SIZE(hainan_golden_registers));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
hainan_golden_registers2,
|
hainan_golden_registers2,
|
||||||
(const u32)ARRAY_SIZE(hainan_golden_registers2));
|
ARRAY_SIZE(hainan_golden_registers2));
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
hainan_mgcg_cgcg_init,
|
hainan_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
|
ARRAY_SIZE(hainan_mgcg_cgcg_init));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -252,7 +252,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
||||||
ring->idx, tmp);
|
ring->idx, tmp);
|
||||||
|
@ -317,7 +317,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
}
|
}
|
||||||
tmp = le32_to_cpu(adev->wb.wb[index]);
|
tmp = le32_to_cpu(adev->wb.wb[index]);
|
||||||
if (tmp == 0xDEADBEEF) {
|
if (tmp == 0xDEADBEEF) {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
|
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
|
||||||
|
|
|
@ -5845,9 +5845,9 @@ static int si_set_mc_special_registers(struct amdgpu_device *adev,
|
||||||
((temp_reg & 0xffff0000)) |
|
((temp_reg & 0xffff0000)) |
|
||||||
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
|
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
|
||||||
j++;
|
j++;
|
||||||
|
|
||||||
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
|
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
temp_reg = RREG32(MC_PMG_CMD_MRS);
|
temp_reg = RREG32(MC_PMG_CMD_MRS);
|
||||||
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
|
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
|
||||||
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
|
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
|
||||||
|
@ -5859,18 +5859,16 @@ static int si_set_mc_special_registers(struct amdgpu_device *adev,
|
||||||
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
|
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
|
||||||
}
|
}
|
||||||
j++;
|
j++;
|
||||||
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
|
if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
|
||||||
|
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
|
||||||
|
return -EINVAL;
|
||||||
table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
|
table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
|
||||||
table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
|
table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
|
||||||
for (k = 0; k < table->num_entries; k++)
|
for (k = 0; k < table->num_entries; k++)
|
||||||
table->mc_reg_table_entry[k].mc_data[j] =
|
table->mc_reg_table_entry[k].mc_data[j] =
|
||||||
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
|
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
|
||||||
j++;
|
j++;
|
||||||
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case MC_SEQ_RESERVE_M:
|
case MC_SEQ_RESERVE_M:
|
||||||
|
@ -5882,8 +5880,6 @@ static int si_set_mc_special_registers(struct amdgpu_device *adev,
|
||||||
(temp_reg & 0xffff0000) |
|
(temp_reg & 0xffff0000) |
|
||||||
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
|
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
|
||||||
j++;
|
j++;
|
||||||
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
|
|
||||||
return -EINVAL;
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -34,18 +34,18 @@
|
||||||
#include "atom.h"
|
#include "atom.h"
|
||||||
#include "amd_pcie.h"
|
#include "amd_pcie.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "vega10/UVD/uvd_7_0_offset.h"
|
#include "uvd/uvd_7_0_offset.h"
|
||||||
#include "vega10/GC/gc_9_0_offset.h"
|
#include "gc/gc_9_0_offset.h"
|
||||||
#include "vega10/GC/gc_9_0_sh_mask.h"
|
#include "gc/gc_9_0_sh_mask.h"
|
||||||
#include "vega10/SDMA0/sdma0_4_0_offset.h"
|
#include "sdma0/sdma0_4_0_offset.h"
|
||||||
#include "vega10/SDMA1/sdma1_4_0_offset.h"
|
#include "sdma1/sdma1_4_0_offset.h"
|
||||||
#include "vega10/HDP/hdp_4_0_offset.h"
|
#include "hdp/hdp_4_0_offset.h"
|
||||||
#include "vega10/HDP/hdp_4_0_sh_mask.h"
|
#include "hdp/hdp_4_0_sh_mask.h"
|
||||||
#include "vega10/MP/mp_9_0_offset.h"
|
#include "mp/mp_9_0_offset.h"
|
||||||
#include "vega10/MP/mp_9_0_sh_mask.h"
|
#include "mp/mp_9_0_sh_mask.h"
|
||||||
#include "vega10/SMUIO/smuio_9_0_offset.h"
|
#include "smuio/smuio_9_0_offset.h"
|
||||||
#include "vega10/SMUIO/smuio_9_0_sh_mask.h"
|
#include "smuio/smuio_9_0_sh_mask.h"
|
||||||
|
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
@ -265,12 +265,12 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
vega10_golden_init,
|
vega10_golden_init,
|
||||||
(const u32)ARRAY_SIZE(vega10_golden_init));
|
ARRAY_SIZE(vega10_golden_init));
|
||||||
break;
|
break;
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
raven_golden_init,
|
raven_golden_init,
|
||||||
(const u32)ARRAY_SIZE(raven_golden_init));
|
ARRAY_SIZE(raven_golden_init));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -521,7 +521,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
|
||||||
ring->idx, i);
|
ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
||||||
|
@ -563,7 +563,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
|
||||||
|
|
||||||
/* programm the VCPU memory controller bits 0-27 */
|
/* programm the VCPU memory controller bits 0-27 */
|
||||||
addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
|
addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
|
||||||
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4) >> 3;
|
size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
|
||||||
WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
|
WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
|
||||||
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
|
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
|
||||||
|
|
||||||
|
|
|
@ -258,7 +258,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
|
||||||
upper_32_bits(adev->uvd.gpu_addr));
|
upper_32_bits(adev->uvd.gpu_addr));
|
||||||
|
|
||||||
offset = AMDGPU_UVD_FIRMWARE_OFFSET;
|
offset = AMDGPU_UVD_FIRMWARE_OFFSET;
|
||||||
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
|
size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
|
||||||
WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
|
WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
|
||||||
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
|
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
|
||||||
|
|
||||||
|
@ -536,7 +536,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
|
||||||
ring->idx, i);
|
ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
||||||
|
|
|
@ -184,7 +184,7 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
|
||||||
ring->idx, i);
|
ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed\n",
|
DRM_ERROR("amdgpu: ring %d test failed\n",
|
||||||
|
@ -360,7 +360,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
} else if (r < 0) {
|
} else if (r < 0) {
|
||||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||||
} else {
|
} else {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
}
|
}
|
||||||
error:
|
error:
|
||||||
|
@ -416,7 +416,7 @@ static int uvd_v6_0_sw_init(void *handle)
|
||||||
ring = &adev->uvd.ring_enc[0];
|
ring = &adev->uvd.ring_enc[0];
|
||||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||||
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
|
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
|
||||||
rq, amdgpu_sched_jobs);
|
rq, amdgpu_sched_jobs, NULL);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
|
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
|
||||||
return r;
|
return r;
|
||||||
|
@ -603,7 +603,7 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
|
||||||
upper_32_bits(adev->uvd.gpu_addr));
|
upper_32_bits(adev->uvd.gpu_addr));
|
||||||
|
|
||||||
offset = AMDGPU_UVD_FIRMWARE_OFFSET;
|
offset = AMDGPU_UVD_FIRMWARE_OFFSET;
|
||||||
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
|
size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
|
||||||
WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
|
WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
|
||||||
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
|
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
|
||||||
|
|
||||||
|
@ -1008,7 +1008,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
|
||||||
ring->idx, i);
|
ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
||||||
|
|
|
@ -29,16 +29,16 @@
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
#include "mmsch_v1_0.h"
|
#include "mmsch_v1_0.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "vega10/UVD/uvd_7_0_offset.h"
|
#include "uvd/uvd_7_0_offset.h"
|
||||||
#include "vega10/UVD/uvd_7_0_sh_mask.h"
|
#include "uvd/uvd_7_0_sh_mask.h"
|
||||||
#include "vega10/VCE/vce_4_0_offset.h"
|
#include "vce/vce_4_0_offset.h"
|
||||||
#include "vega10/VCE/vce_4_0_default.h"
|
#include "vce/vce_4_0_default.h"
|
||||||
#include "vega10/VCE/vce_4_0_sh_mask.h"
|
#include "vce/vce_4_0_sh_mask.h"
|
||||||
#include "vega10/NBIF/nbif_6_1_offset.h"
|
#include "nbif/nbif_6_1_offset.h"
|
||||||
#include "vega10/HDP/hdp_4_0_offset.h"
|
#include "hdp/hdp_4_0_offset.h"
|
||||||
#include "vega10/MMHUB/mmhub_1_0_offset.h"
|
#include "mmhub/mmhub_1_0_offset.h"
|
||||||
#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
|
#include "mmhub/mmhub_1_0_sh_mask.h"
|
||||||
|
|
||||||
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
|
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||||
static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
|
static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
|
||||||
|
@ -184,7 +184,7 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
|
||||||
ring->idx, i);
|
ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed\n",
|
DRM_ERROR("amdgpu: ring %d test failed\n",
|
||||||
|
@ -359,7 +359,7 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
} else if (r < 0) {
|
} else if (r < 0) {
|
||||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||||
} else {
|
} else {
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
|
||||||
r = 0;
|
r = 0;
|
||||||
}
|
}
|
||||||
error:
|
error:
|
||||||
|
@ -418,7 +418,7 @@ static int uvd_v7_0_sw_init(void *handle)
|
||||||
ring = &adev->uvd.ring_enc[0];
|
ring = &adev->uvd.ring_enc[0];
|
||||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||||
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
|
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
|
||||||
rq, amdgpu_sched_jobs);
|
rq, amdgpu_sched_jobs, NULL);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
|
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
|
||||||
return r;
|
return r;
|
||||||
|
@ -616,7 +616,7 @@ static int uvd_v7_0_resume(void *handle)
|
||||||
*/
|
*/
|
||||||
static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
|
static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
|
uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
|
||||||
uint32_t offset;
|
uint32_t offset;
|
||||||
|
|
||||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||||
|
@ -1192,7 +1192,7 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i < adev->usec_timeout) {
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
|
||||||
ring->idx, i);
|
ring->idx, i);
|
||||||
} else {
|
} else {
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
||||||
|
|
|
@ -32,12 +32,12 @@
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
#include "mmsch_v1_0.h"
|
#include "mmsch_v1_0.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "vega10/VCE/vce_4_0_offset.h"
|
#include "vce/vce_4_0_offset.h"
|
||||||
#include "vega10/VCE/vce_4_0_default.h"
|
#include "vce/vce_4_0_default.h"
|
||||||
#include "vega10/VCE/vce_4_0_sh_mask.h"
|
#include "vce/vce_4_0_sh_mask.h"
|
||||||
#include "vega10/MMHUB/mmhub_1_0_offset.h"
|
#include "mmhub/mmhub_1_0_offset.h"
|
||||||
#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
|
#include "mmhub/mmhub_1_0_sh_mask.h"
|
||||||
|
|
||||||
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
|
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
|
||||||
|
|
||||||
|
@ -243,37 +243,49 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
|
||||||
|
|
||||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||||
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
|
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
|
|
||||||
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
|
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
|
|
||||||
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
|
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||||
|
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
|
||||||
|
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
|
||||||
} else {
|
} else {
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||||
adev->vce.gpu_addr >> 8);
|
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
|
|
||||||
adev->vce.gpu_addr >> 8);
|
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
|
|
||||||
adev->vce.gpu_addr >> 8);
|
adev->vce.gpu_addr >> 8);
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||||
|
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
|
||||||
|
(adev->vce.gpu_addr >> 40) & 0xff);
|
||||||
}
|
}
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||||
|
mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
|
||||||
|
adev->vce.gpu_addr >> 8);
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||||
|
mmVCE_LMI_VCPU_CACHE_64BIT_BAR1),
|
||||||
|
(adev->vce.gpu_addr >> 40) & 0xff);
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||||
|
mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
|
||||||
|
adev->vce.gpu_addr >> 8);
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||||
|
mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
|
||||||
|
(adev->vce.gpu_addr >> 40) & 0xff);
|
||||||
|
|
||||||
offset = AMDGPU_VCE_FIRMWARE_OFFSET;
|
offset = AMDGPU_VCE_FIRMWARE_OFFSET;
|
||||||
size = VCE_V4_0_FW_SIZE;
|
size = VCE_V4_0_FW_SIZE;
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
|
||||||
offset & 0x7FFFFFFF);
|
offset & ~0x0f000000);
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
|
||||||
|
|
||||||
offset += size;
|
offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
|
||||||
size = VCE_V4_0_STACK_SIZE;
|
size = VCE_V4_0_STACK_SIZE;
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1),
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1),
|
||||||
offset & 0x7FFFFFFF);
|
(offset & ~0x0f000000) | (1 << 24));
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
|
||||||
|
|
||||||
offset += size;
|
offset += size;
|
||||||
size = VCE_V4_0_DATA_SIZE;
|
size = VCE_V4_0_DATA_SIZE;
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2),
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2),
|
||||||
offset & 0x7FFFFFFF);
|
(offset & ~0x0f000000) | (2 << 24));
|
||||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
|
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
|
||||||
|
|
||||||
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
|
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
|
||||||
|
|
|
@ -28,12 +28,12 @@
|
||||||
#include "soc15d.h"
|
#include "soc15d.h"
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "raven1/VCN/vcn_1_0_offset.h"
|
#include "vcn/vcn_1_0_offset.h"
|
||||||
#include "raven1/VCN/vcn_1_0_sh_mask.h"
|
#include "vcn/vcn_1_0_sh_mask.h"
|
||||||
#include "vega10/HDP/hdp_4_0_offset.h"
|
#include "hdp/hdp_4_0_offset.h"
|
||||||
#include "raven1/MMHUB/mmhub_9_1_offset.h"
|
#include "mmhub/mmhub_9_1_offset.h"
|
||||||
#include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
|
#include "mmhub/mmhub_9_1_sh_mask.h"
|
||||||
|
|
||||||
static int vcn_v1_0_start(struct amdgpu_device *adev);
|
static int vcn_v1_0_start(struct amdgpu_device *adev);
|
||||||
static int vcn_v1_0_stop(struct amdgpu_device *adev);
|
static int vcn_v1_0_stop(struct amdgpu_device *adev);
|
||||||
|
|
|
@ -26,9 +26,9 @@
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
|
|
||||||
|
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
#include "vega10/OSSSYS/osssys_4_0_offset.h"
|
#include "oss/osssys_4_0_offset.h"
|
||||||
#include "vega10/OSSSYS/osssys_4_0_sh_mask.h"
|
#include "oss/osssys_4_0_sh_mask.h"
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
#include "vega10_ih.h"
|
#include "vega10_ih.h"
|
||||||
|
@ -46,11 +46,11 @@ static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
|
||||||
*/
|
*/
|
||||||
static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
|
static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
u32 ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
|
u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
|
||||||
|
|
||||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
|
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
|
||||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
|
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), ih_rb_cntl);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
|
||||||
adev->irq.ih.enabled = true;
|
adev->irq.ih.enabled = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,14 +63,14 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
|
static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
u32 ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
|
u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
|
||||||
|
|
||||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
|
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
|
||||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
|
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), ih_rb_cntl);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
|
||||||
/* set rptr, wptr to 0 */
|
/* set rptr, wptr to 0 */
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR), 0);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR), 0);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
|
||||||
adev->irq.ih.enabled = false;
|
adev->irq.ih.enabled = false;
|
||||||
adev->irq.ih.rptr = 0;
|
adev->irq.ih.rptr = 0;
|
||||||
}
|
}
|
||||||
|
@ -102,15 +102,15 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
||||||
else
|
else
|
||||||
nbio_v6_1_ih_control(adev);
|
nbio_v6_1_ih_control(adev);
|
||||||
|
|
||||||
ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
|
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
|
||||||
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
|
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
|
||||||
if (adev->irq.ih.use_bus_addr) {
|
if (adev->irq.ih.use_bus_addr) {
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE), adev->irq.ih.rb_dma_addr >> 8);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8);
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI), ((u64)adev->irq.ih.rb_dma_addr >> 40) & 0xff);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, ((u64)adev->irq.ih.rb_dma_addr >> 40) & 0xff);
|
||||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 1);
|
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 1);
|
||||||
} else {
|
} else {
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE), adev->irq.ih.gpu_addr >> 8);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI), (adev->irq.ih.gpu_addr >> 40) & 0xff);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (adev->irq.ih.gpu_addr >> 40) & 0xff);
|
||||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 4);
|
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 4);
|
||||||
}
|
}
|
||||||
rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
|
rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
|
||||||
|
@ -126,21 +126,21 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
||||||
if (adev->irq.msi_enabled)
|
if (adev->irq.msi_enabled)
|
||||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1);
|
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1);
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), ih_rb_cntl);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
|
||||||
|
|
||||||
/* set the writeback address whether it's enabled or not */
|
/* set the writeback address whether it's enabled or not */
|
||||||
if (adev->irq.ih.use_bus_addr)
|
if (adev->irq.ih.use_bus_addr)
|
||||||
wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4);
|
wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4);
|
||||||
else
|
else
|
||||||
wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
|
wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO), lower_32_bits(wptr_off));
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI), upper_32_bits(wptr_off) & 0xFF);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
|
||||||
|
|
||||||
/* set rptr, wptr to 0 */
|
/* set rptr, wptr to 0 */
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR), 0);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR), 0);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
|
||||||
|
|
||||||
ih_doorbell_rtpr = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR));
|
ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
|
||||||
if (adev->irq.ih.use_doorbell) {
|
if (adev->irq.ih.use_doorbell) {
|
||||||
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
|
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
|
||||||
OFFSET, adev->irq.ih.doorbell_index);
|
OFFSET, adev->irq.ih.doorbell_index);
|
||||||
|
@ -150,20 +150,20 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
||||||
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
|
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
|
||||||
ENABLE, 0);
|
ENABLE, 0);
|
||||||
}
|
}
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR), ih_doorbell_rtpr);
|
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
|
||||||
if (adev->flags & AMD_IS_APU)
|
if (adev->flags & AMD_IS_APU)
|
||||||
nbio_v7_0_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
|
nbio_v7_0_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
|
||||||
else
|
else
|
||||||
nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
|
nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL));
|
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
|
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
|
||||||
CLIENT18_IS_STORM_CLIENT, 1);
|
CLIENT18_IS_STORM_CLIENT, 1);
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL), tmp);
|
WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_INT_FLOOD_CNTL));
|
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
|
tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_INT_FLOOD_CNTL), tmp);
|
WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
|
||||||
|
|
||||||
pci_set_master(adev->pdev);
|
pci_set_master(adev->pdev);
|
||||||
|
|
||||||
|
@ -367,7 +367,7 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev)
|
||||||
adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
|
adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
|
||||||
WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr);
|
WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr);
|
||||||
} else {
|
} else {
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR), adev->irq.ih.rptr);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, adev->irq.ih.rptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -284,27 +284,27 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
|
||||||
case CHIP_TOPAZ:
|
case CHIP_TOPAZ:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
iceland_mgcg_cgcg_init,
|
iceland_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
|
ARRAY_SIZE(iceland_mgcg_cgcg_init));
|
||||||
break;
|
break;
|
||||||
case CHIP_FIJI:
|
case CHIP_FIJI:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
fiji_mgcg_cgcg_init,
|
fiji_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
|
ARRAY_SIZE(fiji_mgcg_cgcg_init));
|
||||||
break;
|
break;
|
||||||
case CHIP_TONGA:
|
case CHIP_TONGA:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
tonga_mgcg_cgcg_init,
|
tonga_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
|
ARRAY_SIZE(tonga_mgcg_cgcg_init));
|
||||||
break;
|
break;
|
||||||
case CHIP_CARRIZO:
|
case CHIP_CARRIZO:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
cz_mgcg_cgcg_init,
|
cz_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
|
ARRAY_SIZE(cz_mgcg_cgcg_init));
|
||||||
break;
|
break;
|
||||||
case CHIP_STONEY:
|
case CHIP_STONEY:
|
||||||
amdgpu_program_register_sequence(adev,
|
amdgpu_program_register_sequence(adev,
|
||||||
stoney_mgcg_cgcg_init,
|
stoney_mgcg_cgcg_init,
|
||||||
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
||||||
break;
|
break;
|
||||||
case CHIP_POLARIS11:
|
case CHIP_POLARIS11:
|
||||||
case CHIP_POLARIS10:
|
case CHIP_POLARIS10:
|
||||||
|
|
|
@ -59,9 +59,9 @@
|
||||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||||
#include "ivsrcid/irqsrcs_dcn_1_0.h"
|
#include "ivsrcid/irqsrcs_dcn_1_0.h"
|
||||||
|
|
||||||
#include "raven1/DCN/dcn_1_0_offset.h"
|
#include "dcn/dcn_1_0_offset.h"
|
||||||
#include "raven1/DCN/dcn_1_0_sh_mask.h"
|
#include "dcn/dcn_1_0_sh_mask.h"
|
||||||
#include "vega10/soc15ip.h"
|
#include "soc15ip.h"
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
#endif
|
#endif
|
||||||
|
@ -792,7 +792,7 @@ dm_atomic_state_alloc_free(struct drm_atomic_state *state)
|
||||||
|
|
||||||
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
|
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
|
||||||
.fb_create = amdgpu_user_framebuffer_create,
|
.fb_create = amdgpu_user_framebuffer_create,
|
||||||
.output_poll_changed = amdgpu_output_poll_changed,
|
.output_poll_changed = drm_fb_helper_output_poll_changed,
|
||||||
.atomic_check = amdgpu_dm_atomic_check,
|
.atomic_check = amdgpu_dm_atomic_check,
|
||||||
.atomic_commit = amdgpu_dm_atomic_commit,
|
.atomic_commit = amdgpu_dm_atomic_commit,
|
||||||
.atomic_state_alloc = dm_atomic_state_alloc,
|
.atomic_state_alloc = dm_atomic_state_alloc,
|
||||||
|
@ -1590,7 +1590,6 @@ static int dm_early_init(void *handle)
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
|
adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
|
||||||
amdgpu_dm_set_irq_funcs(adev);
|
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_BONAIRE:
|
case CHIP_BONAIRE:
|
||||||
|
@ -1664,6 +1663,8 @@ static int dm_early_init(void *handle)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
amdgpu_dm_set_irq_funcs(adev);
|
||||||
|
|
||||||
if (adev->mode_info.funcs == NULL)
|
if (adev->mode_info.funcs == NULL)
|
||||||
adev->mode_info.funcs = &dm_display_funcs;
|
adev->mode_info.funcs = &dm_display_funcs;
|
||||||
|
|
||||||
|
@ -1679,18 +1680,6 @@ static int dm_early_init(void *handle)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct dm_connector_state {
|
|
||||||
struct drm_connector_state base;
|
|
||||||
|
|
||||||
enum amdgpu_rmx_type scaling;
|
|
||||||
uint8_t underscan_vborder;
|
|
||||||
uint8_t underscan_hborder;
|
|
||||||
bool underscan_enable;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define to_dm_connector_state(x)\
|
|
||||||
container_of((x), struct dm_connector_state, base)
|
|
||||||
|
|
||||||
static bool modeset_required(struct drm_crtc_state *crtc_state,
|
static bool modeset_required(struct drm_crtc_state *crtc_state,
|
||||||
struct dc_stream_state *new_stream,
|
struct dc_stream_state *new_stream,
|
||||||
struct dc_stream_state *old_stream)
|
struct dc_stream_state *old_stream)
|
||||||
|
@ -1773,8 +1762,7 @@ static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
|
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
|
||||||
uint64_t *tiling_flags,
|
uint64_t *tiling_flags)
|
||||||
uint64_t *fb_location)
|
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||||
int r = amdgpu_bo_reserve(rbo, false);
|
int r = amdgpu_bo_reserve(rbo, false);
|
||||||
|
@ -1786,9 +1774,6 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fb_location)
|
|
||||||
*fb_location = amdgpu_bo_gpu_offset(rbo);
|
|
||||||
|
|
||||||
if (tiling_flags)
|
if (tiling_flags)
|
||||||
amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
|
amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
|
||||||
|
|
||||||
|
@ -1799,12 +1784,9 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
|
||||||
|
|
||||||
static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
|
static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
|
||||||
struct dc_plane_state *plane_state,
|
struct dc_plane_state *plane_state,
|
||||||
const struct amdgpu_framebuffer *amdgpu_fb,
|
const struct amdgpu_framebuffer *amdgpu_fb)
|
||||||
bool addReq)
|
|
||||||
{
|
{
|
||||||
uint64_t tiling_flags;
|
uint64_t tiling_flags;
|
||||||
uint64_t fb_location = 0;
|
|
||||||
uint64_t chroma_addr = 0;
|
|
||||||
unsigned int awidth;
|
unsigned int awidth;
|
||||||
const struct drm_framebuffer *fb = &amdgpu_fb->base;
|
const struct drm_framebuffer *fb = &amdgpu_fb->base;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -1812,8 +1794,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
|
||||||
|
|
||||||
ret = get_fb_info(
|
ret = get_fb_info(
|
||||||
amdgpu_fb,
|
amdgpu_fb,
|
||||||
&tiling_flags,
|
&tiling_flags);
|
||||||
addReq == true ? &fb_location:NULL);
|
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1851,8 +1832,6 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
|
||||||
|
|
||||||
if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
|
if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
|
||||||
plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
|
plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
|
||||||
plane_state->address.grph.addr.low_part = lower_32_bits(fb_location);
|
|
||||||
plane_state->address.grph.addr.high_part = upper_32_bits(fb_location);
|
|
||||||
plane_state->plane_size.grph.surface_size.x = 0;
|
plane_state->plane_size.grph.surface_size.x = 0;
|
||||||
plane_state->plane_size.grph.surface_size.y = 0;
|
plane_state->plane_size.grph.surface_size.y = 0;
|
||||||
plane_state->plane_size.grph.surface_size.width = fb->width;
|
plane_state->plane_size.grph.surface_size.width = fb->width;
|
||||||
|
@ -1865,15 +1844,6 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
|
||||||
} else {
|
} else {
|
||||||
awidth = ALIGN(fb->width, 64);
|
awidth = ALIGN(fb->width, 64);
|
||||||
plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
|
plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
|
||||||
plane_state->address.video_progressive.luma_addr.low_part
|
|
||||||
= lower_32_bits(fb_location);
|
|
||||||
plane_state->address.video_progressive.luma_addr.high_part
|
|
||||||
= upper_32_bits(fb_location);
|
|
||||||
chroma_addr = fb_location + (u64)(awidth * fb->height);
|
|
||||||
plane_state->address.video_progressive.chroma_addr.low_part
|
|
||||||
= lower_32_bits(chroma_addr);
|
|
||||||
plane_state->address.video_progressive.chroma_addr.high_part
|
|
||||||
= upper_32_bits(chroma_addr);
|
|
||||||
plane_state->plane_size.video.luma_size.x = 0;
|
plane_state->plane_size.video.luma_size.x = 0;
|
||||||
plane_state->plane_size.video.luma_size.y = 0;
|
plane_state->plane_size.video.luma_size.y = 0;
|
||||||
plane_state->plane_size.video.luma_size.width = awidth;
|
plane_state->plane_size.video.luma_size.width = awidth;
|
||||||
|
@ -1983,8 +1953,7 @@ static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
|
||||||
static int fill_plane_attributes(struct amdgpu_device *adev,
|
static int fill_plane_attributes(struct amdgpu_device *adev,
|
||||||
struct dc_plane_state *dc_plane_state,
|
struct dc_plane_state *dc_plane_state,
|
||||||
struct drm_plane_state *plane_state,
|
struct drm_plane_state *plane_state,
|
||||||
struct drm_crtc_state *crtc_state,
|
struct drm_crtc_state *crtc_state)
|
||||||
bool addrReq)
|
|
||||||
{
|
{
|
||||||
const struct amdgpu_framebuffer *amdgpu_fb =
|
const struct amdgpu_framebuffer *amdgpu_fb =
|
||||||
to_amdgpu_framebuffer(plane_state->fb);
|
to_amdgpu_framebuffer(plane_state->fb);
|
||||||
|
@ -1998,8 +1967,7 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
|
||||||
ret = fill_plane_attributes_from_fb(
|
ret = fill_plane_attributes_from_fb(
|
||||||
crtc->dev->dev_private,
|
crtc->dev->dev_private,
|
||||||
dc_plane_state,
|
dc_plane_state,
|
||||||
amdgpu_fb,
|
amdgpu_fb);
|
||||||
addrReq);
|
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2174,6 +2142,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
|
||||||
const struct drm_connector *connector)
|
const struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
struct dc_crtc_timing *timing_out = &stream->timing;
|
struct dc_crtc_timing *timing_out = &stream->timing;
|
||||||
|
struct dc_transfer_func *tf = dc_create_transfer_func();
|
||||||
|
|
||||||
memset(timing_out, 0, sizeof(struct dc_crtc_timing));
|
memset(timing_out, 0, sizeof(struct dc_crtc_timing));
|
||||||
|
|
||||||
|
@ -2217,13 +2186,9 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
|
||||||
|
|
||||||
stream->output_color_space = get_output_color_space(timing_out);
|
stream->output_color_space = get_output_color_space(timing_out);
|
||||||
|
|
||||||
{
|
tf->type = TF_TYPE_PREDEFINED;
|
||||||
struct dc_transfer_func *tf = dc_create_transfer_func();
|
tf->tf = TRANSFER_FUNCTION_SRGB;
|
||||||
|
stream->out_transfer_func = tf;
|
||||||
tf->type = TF_TYPE_PREDEFINED;
|
|
||||||
tf->tf = TRANSFER_FUNCTION_SRGB;
|
|
||||||
stream->out_transfer_func = tf;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fill_audio_info(struct audio_info *audio_info,
|
static void fill_audio_info(struct audio_info *audio_info,
|
||||||
|
@ -2330,6 +2295,56 @@ static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void set_multisync_trigger_params(
|
||||||
|
struct dc_stream_state *stream)
|
||||||
|
{
|
||||||
|
if (stream->triggered_crtc_reset.enabled) {
|
||||||
|
stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
|
||||||
|
stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void set_master_stream(struct dc_stream_state *stream_set[],
|
||||||
|
int stream_count)
|
||||||
|
{
|
||||||
|
int j, highest_rfr = 0, master_stream = 0;
|
||||||
|
|
||||||
|
for (j = 0; j < stream_count; j++) {
|
||||||
|
if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
|
||||||
|
int refresh_rate = 0;
|
||||||
|
|
||||||
|
refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
|
||||||
|
(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
|
||||||
|
if (refresh_rate > highest_rfr) {
|
||||||
|
highest_rfr = refresh_rate;
|
||||||
|
master_stream = j;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (j = 0; j < stream_count; j++) {
|
||||||
|
if (stream_set[j] && j != master_stream)
|
||||||
|
stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
|
||||||
|
{
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
|
if (context->stream_count < 2)
|
||||||
|
return;
|
||||||
|
for (i = 0; i < context->stream_count ; i++) {
|
||||||
|
if (!context->streams[i])
|
||||||
|
continue;
|
||||||
|
/* TODO: add a function to read AMD VSDB bits and will set
|
||||||
|
* crtc_sync_master.multi_sync_enabled flag
|
||||||
|
* For now its set to false
|
||||||
|
*/
|
||||||
|
set_multisync_trigger_params(context->streams[i]);
|
||||||
|
}
|
||||||
|
set_master_stream(context->streams, context->stream_count);
|
||||||
|
}
|
||||||
|
|
||||||
static struct dc_stream_state *
|
static struct dc_stream_state *
|
||||||
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||||
const struct drm_display_mode *drm_mode,
|
const struct drm_display_mode *drm_mode,
|
||||||
|
@ -2986,7 +3001,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
||||||
= lower_32_bits(afb->address);
|
= lower_32_bits(afb->address);
|
||||||
plane_state->address.video_progressive.luma_addr.high_part
|
plane_state->address.video_progressive.luma_addr.high_part
|
||||||
= upper_32_bits(afb->address);
|
= upper_32_bits(afb->address);
|
||||||
chroma_addr = afb->address + (u64)(awidth * new_state->fb->height);
|
chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
|
||||||
plane_state->address.video_progressive.chroma_addr.low_part
|
plane_state->address.video_progressive.chroma_addr.low_part
|
||||||
= lower_32_bits(chroma_addr);
|
= lower_32_bits(chroma_addr);
|
||||||
plane_state->address.video_progressive.chroma_addr.high_part
|
plane_state->address.video_progressive.chroma_addr.high_part
|
||||||
|
@ -3994,6 +4009,19 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
|
||||||
|
* @crtc_state: the DRM CRTC state
|
||||||
|
* @stream_state: the DC stream state.
|
||||||
|
*
|
||||||
|
* Copy the mirrored transient state flags from DRM, to DC. It is used to bring
|
||||||
|
* a dc_stream_state's flags in sync with a drm_crtc_state's flags.
|
||||||
|
*/
|
||||||
|
static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
|
||||||
|
struct dc_stream_state *stream_state)
|
||||||
|
{
|
||||||
|
stream_state->mode_changed = crtc_state->mode_changed;
|
||||||
|
}
|
||||||
|
|
||||||
static int amdgpu_dm_atomic_commit(struct drm_device *dev,
|
static int amdgpu_dm_atomic_commit(struct drm_device *dev,
|
||||||
struct drm_atomic_state *state,
|
struct drm_atomic_state *state,
|
||||||
|
@ -4033,11 +4061,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||||
struct amdgpu_display_manager *dm = &adev->dm;
|
struct amdgpu_display_manager *dm = &adev->dm;
|
||||||
struct dm_atomic_state *dm_state;
|
struct dm_atomic_state *dm_state;
|
||||||
uint32_t i, j;
|
uint32_t i, j;
|
||||||
uint32_t new_crtcs_count = 0;
|
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
||||||
struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
|
|
||||||
struct dc_stream_state *new_stream = NULL;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
bool wait_for_vblank = true;
|
bool wait_for_vblank = true;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
|
@ -4067,6 +4092,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||||
new_crtc_state->active_changed,
|
new_crtc_state->active_changed,
|
||||||
new_crtc_state->connectors_changed);
|
new_crtc_state->connectors_changed);
|
||||||
|
|
||||||
|
/* Copy all transient state flags into dc state */
|
||||||
|
if (dm_new_crtc_state->stream) {
|
||||||
|
amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
|
||||||
|
dm_new_crtc_state->stream);
|
||||||
|
}
|
||||||
|
|
||||||
/* handles headless hotplug case, updating new_state and
|
/* handles headless hotplug case, updating new_state and
|
||||||
* aconnector as needed
|
* aconnector as needed
|
||||||
*/
|
*/
|
||||||
|
@ -4096,25 +4127,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (dm_old_crtc_state->stream)
|
if (dm_old_crtc_state->stream)
|
||||||
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
|
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* this loop saves set mode crtcs
|
|
||||||
* we needed to enable vblanks once all
|
|
||||||
* resources acquired in dc after dc_commit_streams
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*TODO move all this into dm_crtc_state, get rid of
|
|
||||||
* new_crtcs array and use old and new atomic states
|
|
||||||
* instead
|
|
||||||
*/
|
|
||||||
new_crtcs[new_crtcs_count] = acrtc;
|
|
||||||
new_crtcs_count++;
|
|
||||||
|
|
||||||
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
|
|
||||||
acrtc->enabled = true;
|
acrtc->enabled = true;
|
||||||
acrtc->hw_mode = new_crtc_state->mode;
|
acrtc->hw_mode = new_crtc_state->mode;
|
||||||
crtc->hwmode = new_crtc_state->mode;
|
crtc->hwmode = new_crtc_state->mode;
|
||||||
|
@ -4132,31 +4147,61 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||||
* are removed from freesync module
|
* are removed from freesync module
|
||||||
*/
|
*/
|
||||||
if (adev->dm.freesync_module) {
|
if (adev->dm.freesync_module) {
|
||||||
for (i = 0; i < new_crtcs_count; i++) {
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
|
||||||
|
new_crtc_state, i) {
|
||||||
struct amdgpu_dm_connector *aconnector = NULL;
|
struct amdgpu_dm_connector *aconnector = NULL;
|
||||||
|
struct dm_connector_state *dm_new_con_state = NULL;
|
||||||
|
struct amdgpu_crtc *acrtc = NULL;
|
||||||
|
bool modeset_needed;
|
||||||
|
|
||||||
new_crtc_state = drm_atomic_get_new_crtc_state(state,
|
|
||||||
&new_crtcs[i]->base);
|
|
||||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||||
|
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||||
|
modeset_needed = modeset_required(
|
||||||
|
new_crtc_state,
|
||||||
|
dm_new_crtc_state->stream,
|
||||||
|
dm_old_crtc_state->stream);
|
||||||
|
/* We add stream to freesync if:
|
||||||
|
* 1. Said stream is not null, and
|
||||||
|
* 2. A modeset is requested. This means that the
|
||||||
|
* stream was removed previously, and needs to be
|
||||||
|
* replaced.
|
||||||
|
*/
|
||||||
|
if (dm_new_crtc_state->stream == NULL ||
|
||||||
|
!modeset_needed)
|
||||||
|
continue;
|
||||||
|
|
||||||
new_stream = dm_new_crtc_state->stream;
|
acrtc = to_amdgpu_crtc(crtc);
|
||||||
aconnector = amdgpu_dm_find_first_crtc_matching_connector(
|
|
||||||
state,
|
aconnector =
|
||||||
&new_crtcs[i]->base);
|
amdgpu_dm_find_first_crtc_matching_connector(
|
||||||
|
state, crtc);
|
||||||
if (!aconnector) {
|
if (!aconnector) {
|
||||||
DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
|
DRM_DEBUG_DRIVER("Atomic commit: Failed to "
|
||||||
"skipping freesync init\n",
|
"find connector for acrtc "
|
||||||
new_crtcs[i]->crtc_id);
|
"id:%d skipping freesync "
|
||||||
|
"init\n",
|
||||||
|
acrtc->crtc_id);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
mod_freesync_add_stream(adev->dm.freesync_module,
|
mod_freesync_add_stream(adev->dm.freesync_module,
|
||||||
new_stream, &aconnector->caps);
|
dm_new_crtc_state->stream,
|
||||||
|
&aconnector->caps);
|
||||||
|
new_con_state = drm_atomic_get_new_connector_state(
|
||||||
|
state, &aconnector->base);
|
||||||
|
dm_new_con_state = to_dm_connector_state(new_con_state);
|
||||||
|
|
||||||
|
mod_freesync_set_user_enable(adev->dm.freesync_module,
|
||||||
|
&dm_new_crtc_state->stream,
|
||||||
|
1,
|
||||||
|
&dm_new_con_state->user_enable);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dm_state->context)
|
if (dm_state->context) {
|
||||||
|
dm_enable_per_frame_crtc_master_sync(dm_state->context);
|
||||||
WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
|
WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
|
||||||
|
}
|
||||||
|
|
||||||
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||||
|
@ -4214,18 +4259,28 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||||
dm_error("%s: Failed to update stream scaling!\n", __func__);
|
dm_error("%s: Failed to update stream scaling!\n", __func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < new_crtcs_count; i++) {
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
|
||||||
|
new_crtc_state, i) {
|
||||||
/*
|
/*
|
||||||
* loop to enable interrupts on newly arrived crtc
|
* loop to enable interrupts on newly arrived crtc
|
||||||
*/
|
*/
|
||||||
struct amdgpu_crtc *acrtc = new_crtcs[i];
|
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||||
|
bool modeset_needed;
|
||||||
|
|
||||||
new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
|
|
||||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||||
|
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||||
|
modeset_needed = modeset_required(
|
||||||
|
new_crtc_state,
|
||||||
|
dm_new_crtc_state->stream,
|
||||||
|
dm_old_crtc_state->stream);
|
||||||
|
|
||||||
|
if (dm_new_crtc_state->stream == NULL || !modeset_needed)
|
||||||
|
continue;
|
||||||
|
|
||||||
if (adev->dm.freesync_module)
|
if (adev->dm.freesync_module)
|
||||||
mod_freesync_notify_mode_change(
|
mod_freesync_notify_mode_change(
|
||||||
adev->dm.freesync_module, &dm_new_crtc_state->stream, 1);
|
adev->dm.freesync_module,
|
||||||
|
&dm_new_crtc_state->stream, 1);
|
||||||
|
|
||||||
manage_dm_interrupts(adev, acrtc, true);
|
manage_dm_interrupts(adev, acrtc, true);
|
||||||
}
|
}
|
||||||
|
@ -4527,6 +4582,7 @@ static int dm_update_crtcs_state(struct dc *dc,
|
||||||
WARN_ON(dm_new_crtc_state->stream);
|
WARN_ON(dm_new_crtc_state->stream);
|
||||||
|
|
||||||
dm_new_crtc_state->stream = new_stream;
|
dm_new_crtc_state->stream = new_stream;
|
||||||
|
|
||||||
dc_stream_retain(new_stream);
|
dc_stream_retain(new_stream);
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
|
DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
|
||||||
|
@ -4652,8 +4708,7 @@ static int dm_update_planes_state(struct dc *dc,
|
||||||
new_plane_crtc->dev->dev_private,
|
new_plane_crtc->dev->dev_private,
|
||||||
dm_new_plane_state->dc_state,
|
dm_new_plane_state->dc_state,
|
||||||
new_plane_state,
|
new_plane_state,
|
||||||
new_crtc_state,
|
new_crtc_state);
|
||||||
false);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -4668,6 +4723,11 @@ static int dm_update_planes_state(struct dc *dc,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Tell DC to do a full surface update every time there
|
||||||
|
* is a plane change. Inefficient, but works for now.
|
||||||
|
*/
|
||||||
|
dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
|
||||||
|
|
||||||
*lock_and_validation_needed = true;
|
*lock_and_validation_needed = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4679,8 +4739,6 @@ static int dm_update_planes_state(struct dc *dc,
|
||||||
static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||||
struct drm_atomic_state *state)
|
struct drm_atomic_state *state)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
int ret;
|
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
struct dc *dc = adev->dm.dc;
|
struct dc *dc = adev->dm.dc;
|
||||||
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
|
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
|
||||||
|
@ -4688,6 +4746,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||||
struct drm_connector_state *old_con_state, *new_con_state;
|
struct drm_connector_state *old_con_state, *new_con_state;
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This bool will be set for true for any modeset/reset
|
* This bool will be set for true for any modeset/reset
|
||||||
|
@ -4699,37 +4758,21 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
/*
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||||
* legacy_cursor_update should be made false for SoC's having
|
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
|
||||||
* a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
|
!new_crtc_state->color_mgmt_changed)
|
||||||
* otherwise for software cursor plane,
|
continue;
|
||||||
* we should not add it to list of affected planes.
|
|
||||||
*/
|
|
||||||
if (state->legacy_cursor_update) {
|
|
||||||
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
|
||||||
if (new_crtc_state->color_mgmt_changed) {
|
|
||||||
ret = drm_atomic_add_affected_planes(state, crtc);
|
|
||||||
if (ret)
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
|
||||||
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
|
|
||||||
!new_crtc_state->color_mgmt_changed)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!new_crtc_state->enable)
|
if (!new_crtc_state->enable)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret = drm_atomic_add_affected_connectors(state, crtc);
|
ret = drm_atomic_add_affected_connectors(state, crtc);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = drm_atomic_add_affected_planes(state, crtc);
|
ret = drm_atomic_add_affected_planes(state, crtc);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dm_state->context = dc_create_state();
|
dm_state->context = dc_create_state();
|
||||||
|
|
|
@ -220,6 +220,18 @@ struct dm_atomic_state {
|
||||||
|
|
||||||
#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
|
#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
|
||||||
|
|
||||||
|
struct dm_connector_state {
|
||||||
|
struct drm_connector_state base;
|
||||||
|
|
||||||
|
enum amdgpu_rmx_type scaling;
|
||||||
|
uint8_t underscan_vborder;
|
||||||
|
uint8_t underscan_hborder;
|
||||||
|
bool underscan_enable;
|
||||||
|
struct mod_freesync_user_enable user_enable;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define to_dm_connector_state(x)\
|
||||||
|
container_of((x), struct dm_connector_state, base)
|
||||||
|
|
||||||
void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
|
void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
|
||||||
struct drm_connector_state *
|
struct drm_connector_state *
|
||||||
|
|
|
@ -683,13 +683,16 @@ static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
|
||||||
|
|
||||||
void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
|
void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
|
if (adev->mode_info.num_crtc > 0)
|
||||||
|
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
|
||||||
|
else
|
||||||
|
adev->crtc_irq.num_types = 0;
|
||||||
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
|
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
|
||||||
|
|
||||||
adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
|
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
|
||||||
adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
|
adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
|
||||||
|
|
||||||
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
|
adev->hpd_irq.num_types = adev->mode_info.num_hpd;
|
||||||
adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
|
adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,6 +41,10 @@ unsigned long long dm_get_timestamp(struct dc_context *ctx)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void dm_perf_trace_timestamp(const char *func_name, unsigned int line)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
bool dm_write_persistent_data(struct dc_context *ctx,
|
bool dm_write_persistent_data(struct dc_context *ctx,
|
||||||
const struct dc_sink *sink,
|
const struct dc_sink *sink,
|
||||||
const char *module_name,
|
const char *module_name,
|
||||||
|
@ -131,11 +135,12 @@ bool dm_pp_apply_display_requirements(
|
||||||
adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
|
adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
|
||||||
|
|
||||||
/* TODO: complete implementation of
|
/* TODO: complete implementation of
|
||||||
* amd_powerplay_display_configuration_change().
|
* pp_display_configuration_change().
|
||||||
* Follow example of:
|
* Follow example of:
|
||||||
* PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
|
* PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
|
||||||
* PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
|
* PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
|
||||||
amd_powerplay_display_configuration_change(
|
if (adev->powerplay.pp_funcs->display_configuration_change)
|
||||||
|
adev->powerplay.pp_funcs->display_configuration_change(
|
||||||
adev->powerplay.pp_handle,
|
adev->powerplay.pp_handle,
|
||||||
&adev->pm.pm_display_cfg);
|
&adev->pm.pm_display_cfg);
|
||||||
|
|
||||||
|
@ -264,22 +269,26 @@ bool dm_pp_get_clock_levels_by_type(
|
||||||
struct amd_pp_simple_clock_info validation_clks = { 0 };
|
struct amd_pp_simple_clock_info validation_clks = { 0 };
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
if (amd_powerplay_get_clock_by_type(pp_handle,
|
if (adev->powerplay.pp_funcs->get_clock_by_type) {
|
||||||
|
if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
|
||||||
dc_to_pp_clock_type(clk_type), &pp_clks)) {
|
dc_to_pp_clock_type(clk_type), &pp_clks)) {
|
||||||
/* Error in pplib. Provide default values. */
|
/* Error in pplib. Provide default values. */
|
||||||
get_default_clock_levels(clk_type, dc_clks);
|
get_default_clock_levels(clk_type, dc_clks);
|
||||||
return true;
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
|
pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
|
||||||
|
|
||||||
if (amd_powerplay_get_display_mode_validation_clocks(pp_handle,
|
if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
|
||||||
&validation_clks)) {
|
if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
|
||||||
/* Error in pplib. Provide default values. */
|
pp_handle, &validation_clks)) {
|
||||||
DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
|
/* Error in pplib. Provide default values. */
|
||||||
validation_clks.engine_max_clock = 72000;
|
DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
|
||||||
validation_clks.memory_max_clock = 80000;
|
validation_clks.engine_max_clock = 72000;
|
||||||
validation_clks.level = 0;
|
validation_clks.memory_max_clock = 80000;
|
||||||
|
validation_clks.level = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_INFO("DM_PPLIB: Validation clocks:\n");
|
DRM_INFO("DM_PPLIB: Validation clocks:\n");
|
||||||
|
|
|
@ -83,15 +83,11 @@ void dc_conn_log(struct dc_context *ctx,
|
||||||
link->link_index);
|
link->link_index);
|
||||||
|
|
||||||
va_start(args, msg);
|
va_start(args, msg);
|
||||||
entry.buf_offset += dm_log_to_buffer(
|
dm_logger_append_va(&entry, msg, args);
|
||||||
&entry.buf[entry.buf_offset],
|
|
||||||
LOG_MAX_LINE_SIZE - entry.buf_offset,
|
|
||||||
msg, args);
|
|
||||||
|
|
||||||
if (entry.buf[strlen(entry.buf) - 1] == '\n') {
|
if (entry.buf_offset > 0 &&
|
||||||
entry.buf[strlen(entry.buf) - 1] = '\0';
|
entry.buf[entry.buf_offset - 1] == '\n')
|
||||||
entry.buf_offset--;
|
entry.buf_offset--;
|
||||||
}
|
|
||||||
|
|
||||||
if (hex_data)
|
if (hex_data)
|
||||||
for (i = 0; i < hex_data_count; i++)
|
for (i = 0; i < hex_data_count; i++)
|
||||||
|
|
|
@ -70,9 +70,8 @@ static bool construct(struct dc_context *ctx, struct dal_logger *logger,
|
||||||
{
|
{
|
||||||
/* malloc buffer and init offsets */
|
/* malloc buffer and init offsets */
|
||||||
logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
|
logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
|
||||||
logger->log_buffer = (char *)kzalloc(logger->log_buffer_size * sizeof(char),
|
logger->log_buffer = kcalloc(logger->log_buffer_size, sizeof(char),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
||||||
if (!logger->log_buffer)
|
if (!logger->log_buffer)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -312,6 +311,18 @@ void dm_logger_append(
|
||||||
struct log_entry *entry,
|
struct log_entry *entry,
|
||||||
const char *msg,
|
const char *msg,
|
||||||
...)
|
...)
|
||||||
|
{
|
||||||
|
va_list args;
|
||||||
|
|
||||||
|
va_start(args, msg);
|
||||||
|
dm_logger_append_va(entry, msg, args);
|
||||||
|
va_end(args);
|
||||||
|
}
|
||||||
|
|
||||||
|
void dm_logger_append_va(
|
||||||
|
struct log_entry *entry,
|
||||||
|
const char *msg,
|
||||||
|
va_list args)
|
||||||
{
|
{
|
||||||
struct dal_logger *logger;
|
struct dal_logger *logger;
|
||||||
|
|
||||||
|
@ -326,11 +337,8 @@ void dm_logger_append(
|
||||||
dal_logger_should_log(logger, entry->type)) {
|
dal_logger_should_log(logger, entry->type)) {
|
||||||
|
|
||||||
uint32_t size;
|
uint32_t size;
|
||||||
va_list args;
|
|
||||||
char buffer[LOG_MAX_LINE_SIZE];
|
char buffer[LOG_MAX_LINE_SIZE];
|
||||||
|
|
||||||
va_start(args, msg);
|
|
||||||
|
|
||||||
size = dm_log_to_buffer(
|
size = dm_log_to_buffer(
|
||||||
buffer, LOG_MAX_LINE_SIZE, msg, args);
|
buffer, LOG_MAX_LINE_SIZE, msg, args);
|
||||||
|
|
||||||
|
@ -339,8 +347,6 @@ void dm_logger_append(
|
||||||
} else {
|
} else {
|
||||||
append_entry(entry, "LOG_ERROR, line too long\n", 27);
|
append_entry(entry, "LOG_ERROR, line too long\n", 27);
|
||||||
}
|
}
|
||||||
|
|
||||||
va_end(args);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -432,25 +432,13 @@ static void dcn_bw_calc_rq_dlg_ttu(
|
||||||
input.clks_cfg.dcfclk_mhz = v->dcfclk;
|
input.clks_cfg.dcfclk_mhz = v->dcfclk;
|
||||||
input.clks_cfg.dispclk_mhz = v->dispclk;
|
input.clks_cfg.dispclk_mhz = v->dispclk;
|
||||||
input.clks_cfg.dppclk_mhz = v->dppclk;
|
input.clks_cfg.dppclk_mhz = v->dppclk;
|
||||||
input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz/1000;
|
input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz / 1000.0;
|
||||||
input.clks_cfg.socclk_mhz = v->socclk;
|
input.clks_cfg.socclk_mhz = v->socclk;
|
||||||
input.clks_cfg.voltage = v->voltage_level;
|
input.clks_cfg.voltage = v->voltage_level;
|
||||||
// dc->dml.logger = pool->base.logger;
|
// dc->dml.logger = pool->base.logger;
|
||||||
input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
|
input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
|
||||||
input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
|
input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
|
||||||
//input[in_idx].dout.output_standard;
|
//input[in_idx].dout.output_standard;
|
||||||
switch (v->output_deep_color[in_idx]) {
|
|
||||||
case dcn_bw_encoder_12bpc:
|
|
||||||
input.dout.output_bpc = dm_out_12;
|
|
||||||
break;
|
|
||||||
case dcn_bw_encoder_10bpc:
|
|
||||||
input.dout.output_bpc = dm_out_10;
|
|
||||||
break;
|
|
||||||
case dcn_bw_encoder_8bpc:
|
|
||||||
default:
|
|
||||||
input.dout.output_bpc = dm_out_8;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*todo: soc->sr_enter_plus_exit_time??*/
|
/*todo: soc->sr_enter_plus_exit_time??*/
|
||||||
dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
|
dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
|
||||||
|
@ -1025,6 +1013,8 @@ bool dcn_validate_bandwidth(
|
||||||
if (pipe->plane_state) {
|
if (pipe->plane_state) {
|
||||||
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
|
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
|
||||||
|
|
||||||
|
pipe->plane_state->update_flags.bits.full_update = 1;
|
||||||
|
|
||||||
if (v->dpp_per_plane[input_idx] == 2 ||
|
if (v->dpp_per_plane[input_idx] == 2 ||
|
||||||
((pipe->stream->view_format ==
|
((pipe->stream->view_format ==
|
||||||
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
|
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
|
||||||
|
@ -1064,6 +1054,9 @@ bool dcn_validate_bandwidth(
|
||||||
hsplit_pipe->stream = NULL;
|
hsplit_pipe->stream = NULL;
|
||||||
hsplit_pipe->top_pipe = NULL;
|
hsplit_pipe->top_pipe = NULL;
|
||||||
hsplit_pipe->bottom_pipe = NULL;
|
hsplit_pipe->bottom_pipe = NULL;
|
||||||
|
/* Clear plane_res and stream_res */
|
||||||
|
memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
|
||||||
|
memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
|
||||||
resource_build_scaling_params(pipe);
|
resource_build_scaling_params(pipe);
|
||||||
}
|
}
|
||||||
/* for now important to do this after pipe split for building e2e params */
|
/* for now important to do this after pipe split for building e2e params */
|
||||||
|
@ -1231,40 +1224,62 @@ unsigned int dcn_find_dcfclk_suits_all(
|
||||||
return dcf_clk;
|
return dcf_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (clks->num_levels == 0)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
for (i = 0; i < clks->num_levels; i++)
|
||||||
|
/* Ensure that the result is sane */
|
||||||
|
if (clks->data[i].clocks_in_khz == 0)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void dcn_bw_update_from_pplib(struct dc *dc)
|
void dcn_bw_update_from_pplib(struct dc *dc)
|
||||||
{
|
{
|
||||||
struct dc_context *ctx = dc->ctx;
|
struct dc_context *ctx = dc->ctx;
|
||||||
struct dm_pp_clock_levels_with_voltage clks = {0};
|
struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
|
||||||
|
bool res;
|
||||||
|
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
|
|
||||||
/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
|
/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
|
||||||
|
res = dm_pp_get_clock_levels_by_type_with_voltage(
|
||||||
|
ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
|
||||||
|
|
||||||
if (dm_pp_get_clock_levels_by_type_with_voltage(
|
if (res)
|
||||||
ctx, DM_PP_CLOCK_TYPE_FCLK, &clks) &&
|
res = verify_clock_values(&fclks);
|
||||||
clks.num_levels != 0) {
|
|
||||||
ASSERT(clks.num_levels >= 3);
|
if (res) {
|
||||||
dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (clks.data[0].clocks_in_khz / 1000.0) / 1000.0;
|
ASSERT(fclks.num_levels >= 3);
|
||||||
if (clks.num_levels > 2) {
|
dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0;
|
||||||
dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
|
dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
|
||||||
(clks.data[clks.num_levels - 3].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
|
(fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0)
|
||||||
} else {
|
* ddr4_dram_factor_single_Channel / 1000.0;
|
||||||
dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
|
|
||||||
(clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
|
|
||||||
}
|
|
||||||
dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
|
dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
|
||||||
(clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
|
(fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0)
|
||||||
|
* ddr4_dram_factor_single_Channel / 1000.0;
|
||||||
dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
|
dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
|
||||||
(clks.data[clks.num_levels - 1].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
|
(fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0)
|
||||||
|
* ddr4_dram_factor_single_Channel / 1000.0;
|
||||||
} else
|
} else
|
||||||
BREAK_TO_DEBUGGER();
|
BREAK_TO_DEBUGGER();
|
||||||
if (dm_pp_get_clock_levels_by_type_with_voltage(
|
|
||||||
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &clks) &&
|
res = dm_pp_get_clock_levels_by_type_with_voltage(
|
||||||
clks.num_levels >= 3) {
|
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
|
||||||
dc->dcn_soc->dcfclkv_min0p65 = clks.data[0].clocks_in_khz / 1000.0;
|
|
||||||
dc->dcn_soc->dcfclkv_mid0p72 = clks.data[clks.num_levels - 3].clocks_in_khz / 1000.0;
|
if (res)
|
||||||
dc->dcn_soc->dcfclkv_nom0p8 = clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0;
|
res = verify_clock_values(&dcfclks);
|
||||||
dc->dcn_soc->dcfclkv_max0p9 = clks.data[clks.num_levels - 1].clocks_in_khz / 1000.0;
|
|
||||||
|
if (res && dcfclks.num_levels >= 3) {
|
||||||
|
dc->dcn_soc->dcfclkv_min0p65 = dcfclks.data[0].clocks_in_khz / 1000.0;
|
||||||
|
dc->dcn_soc->dcfclkv_mid0p72 = dcfclks.data[dcfclks.num_levels - 3].clocks_in_khz / 1000.0;
|
||||||
|
dc->dcn_soc->dcfclkv_nom0p8 = dcfclks.data[dcfclks.num_levels - 2].clocks_in_khz / 1000.0;
|
||||||
|
dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0;
|
||||||
} else
|
} else
|
||||||
BREAK_TO_DEBUGGER();
|
BREAK_TO_DEBUGGER();
|
||||||
|
|
||||||
|
|
|
@ -54,6 +54,13 @@
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Private functions
|
* Private functions
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
|
||||||
|
static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
|
||||||
|
{
|
||||||
|
if (new > *original)
|
||||||
|
*original = new;
|
||||||
|
}
|
||||||
|
|
||||||
static void destroy_links(struct dc *dc)
|
static void destroy_links(struct dc *dc)
|
||||||
{
|
{
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
@ -157,7 +164,7 @@ static bool create_links(
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool stream_adjust_vmin_vmax(struct dc *dc,
|
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
|
||||||
struct dc_stream_state **streams, int num_streams,
|
struct dc_stream_state **streams, int num_streams,
|
||||||
int vmin, int vmax)
|
int vmin, int vmax)
|
||||||
{
|
{
|
||||||
|
@ -182,7 +189,7 @@ static bool stream_adjust_vmin_vmax(struct dc *dc,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool stream_get_crtc_position(struct dc *dc,
|
bool dc_stream_get_crtc_position(struct dc *dc,
|
||||||
struct dc_stream_state **streams, int num_streams,
|
struct dc_stream_state **streams, int num_streams,
|
||||||
unsigned int *v_pos, unsigned int *nom_v_pos)
|
unsigned int *v_pos, unsigned int *nom_v_pos)
|
||||||
{
|
{
|
||||||
|
@ -207,45 +214,7 @@ static bool stream_get_crtc_position(struct dc *dc,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
|
void dc_stream_set_static_screen_events(struct dc *dc,
|
||||||
{
|
|
||||||
int i = 0;
|
|
||||||
bool ret = false;
|
|
||||||
struct pipe_ctx *pipes;
|
|
||||||
|
|
||||||
for (i = 0; i < MAX_PIPES; i++) {
|
|
||||||
if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
|
|
||||||
pipes = &dc->current_state->res_ctx.pipe_ctx[i];
|
|
||||||
dc->hwss.program_gamut_remap(pipes);
|
|
||||||
ret = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
|
|
||||||
{
|
|
||||||
int i = 0;
|
|
||||||
bool ret = false;
|
|
||||||
struct pipe_ctx *pipes;
|
|
||||||
|
|
||||||
for (i = 0; i < MAX_PIPES; i++) {
|
|
||||||
if (dc->current_state->res_ctx.pipe_ctx[i].stream
|
|
||||||
== stream) {
|
|
||||||
|
|
||||||
pipes = &dc->current_state->res_ctx.pipe_ctx[i];
|
|
||||||
dc->hwss.program_csc_matrix(pipes,
|
|
||||||
stream->output_color_space,
|
|
||||||
stream->csc_color_matrix.matrix);
|
|
||||||
ret = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void set_static_screen_events(struct dc *dc,
|
|
||||||
struct dc_stream_state **streams,
|
struct dc_stream_state **streams,
|
||||||
int num_streams,
|
int num_streams,
|
||||||
const struct dc_static_screen_events *events)
|
const struct dc_static_screen_events *events)
|
||||||
|
@ -270,177 +239,6 @@ static void set_static_screen_events(struct dc *dc,
|
||||||
dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
|
dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_drive_settings(struct dc *dc,
|
|
||||||
struct link_training_settings *lt_settings,
|
|
||||||
const struct dc_link *link)
|
|
||||||
{
|
|
||||||
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < dc->link_count; i++) {
|
|
||||||
if (dc->links[i] == link)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (i >= dc->link_count)
|
|
||||||
ASSERT_CRITICAL(false);
|
|
||||||
|
|
||||||
dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void perform_link_training(struct dc *dc,
|
|
||||||
struct dc_link_settings *link_setting,
|
|
||||||
bool skip_video_pattern)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < dc->link_count; i++)
|
|
||||||
dc_link_dp_perform_link_training(
|
|
||||||
dc->links[i],
|
|
||||||
link_setting,
|
|
||||||
skip_video_pattern);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void set_preferred_link_settings(struct dc *dc,
|
|
||||||
struct dc_link_settings *link_setting,
|
|
||||||
struct dc_link *link)
|
|
||||||
{
|
|
||||||
link->preferred_link_setting = *link_setting;
|
|
||||||
dp_retrain_link_dp_test(link, link_setting, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void enable_hpd(const struct dc_link *link)
|
|
||||||
{
|
|
||||||
dc_link_dp_enable_hpd(link);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void disable_hpd(const struct dc_link *link)
|
|
||||||
{
|
|
||||||
dc_link_dp_disable_hpd(link);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void set_test_pattern(
|
|
||||||
struct dc_link *link,
|
|
||||||
enum dp_test_pattern test_pattern,
|
|
||||||
const struct link_training_settings *p_link_settings,
|
|
||||||
const unsigned char *p_custom_pattern,
|
|
||||||
unsigned int cust_pattern_size)
|
|
||||||
{
|
|
||||||
if (link != NULL)
|
|
||||||
dc_link_dp_set_test_pattern(
|
|
||||||
link,
|
|
||||||
test_pattern,
|
|
||||||
p_link_settings,
|
|
||||||
p_custom_pattern,
|
|
||||||
cust_pattern_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void set_dither_option(struct dc_stream_state *stream,
|
|
||||||
enum dc_dither_option option)
|
|
||||||
{
|
|
||||||
struct bit_depth_reduction_params params;
|
|
||||||
struct dc_link *link = stream->status.link;
|
|
||||||
struct pipe_ctx *pipes = NULL;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < MAX_PIPES; i++) {
|
|
||||||
if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
|
|
||||||
stream) {
|
|
||||||
pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(¶ms, 0, sizeof(params));
|
|
||||||
if (!pipes)
|
|
||||||
return;
|
|
||||||
if (option > DITHER_OPTION_MAX)
|
|
||||||
return;
|
|
||||||
|
|
||||||
stream->dither_option = option;
|
|
||||||
|
|
||||||
resource_build_bit_depth_reduction_params(stream,
|
|
||||||
¶ms);
|
|
||||||
stream->bit_depth_params = params;
|
|
||||||
pipes->stream_res.opp->funcs->
|
|
||||||
opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_dpms(
|
|
||||||
struct dc *dc,
|
|
||||||
struct dc_stream_state *stream,
|
|
||||||
bool dpms_off)
|
|
||||||
{
|
|
||||||
struct pipe_ctx *pipe_ctx = NULL;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < MAX_PIPES; i++) {
|
|
||||||
if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
|
|
||||||
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!pipe_ctx) {
|
|
||||||
ASSERT(0);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (stream->dpms_off != dpms_off) {
|
|
||||||
stream->dpms_off = dpms_off;
|
|
||||||
if (dpms_off)
|
|
||||||
core_link_disable_stream(pipe_ctx,
|
|
||||||
KEEP_ACQUIRED_RESOURCE);
|
|
||||||
else
|
|
||||||
core_link_enable_stream(dc->current_state, pipe_ctx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void allocate_dc_stream_funcs(struct dc *dc)
|
|
||||||
{
|
|
||||||
if (dc->hwss.set_drr != NULL) {
|
|
||||||
dc->stream_funcs.adjust_vmin_vmax =
|
|
||||||
stream_adjust_vmin_vmax;
|
|
||||||
}
|
|
||||||
|
|
||||||
dc->stream_funcs.set_static_screen_events =
|
|
||||||
set_static_screen_events;
|
|
||||||
|
|
||||||
dc->stream_funcs.get_crtc_position =
|
|
||||||
stream_get_crtc_position;
|
|
||||||
|
|
||||||
dc->stream_funcs.set_gamut_remap =
|
|
||||||
set_gamut_remap;
|
|
||||||
|
|
||||||
dc->stream_funcs.program_csc_matrix =
|
|
||||||
program_csc_matrix;
|
|
||||||
|
|
||||||
dc->stream_funcs.set_dither_option =
|
|
||||||
set_dither_option;
|
|
||||||
|
|
||||||
dc->stream_funcs.set_dpms =
|
|
||||||
set_dpms;
|
|
||||||
|
|
||||||
dc->link_funcs.set_drive_settings =
|
|
||||||
set_drive_settings;
|
|
||||||
|
|
||||||
dc->link_funcs.perform_link_training =
|
|
||||||
perform_link_training;
|
|
||||||
|
|
||||||
dc->link_funcs.set_preferred_link_settings =
|
|
||||||
set_preferred_link_settings;
|
|
||||||
|
|
||||||
dc->link_funcs.enable_hpd =
|
|
||||||
enable_hpd;
|
|
||||||
|
|
||||||
dc->link_funcs.disable_hpd =
|
|
||||||
disable_hpd;
|
|
||||||
|
|
||||||
dc->link_funcs.set_test_pattern =
|
|
||||||
set_test_pattern;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void destruct(struct dc *dc)
|
static void destruct(struct dc *dc)
|
||||||
{
|
{
|
||||||
dc_release_state(dc->current_state);
|
dc_release_state(dc->current_state);
|
||||||
|
@ -558,6 +356,7 @@ static bool construct(struct dc *dc,
|
||||||
|
|
||||||
dc_version = resource_parse_asic_id(init_params->asic_id);
|
dc_version = resource_parse_asic_id(init_params->asic_id);
|
||||||
dc->ctx->dce_version = dc_version;
|
dc->ctx->dce_version = dc_version;
|
||||||
|
|
||||||
#if defined(CONFIG_DRM_AMD_DC_FBC)
|
#if defined(CONFIG_DRM_AMD_DC_FBC)
|
||||||
dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
|
dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
|
||||||
#endif
|
#endif
|
||||||
|
@ -616,8 +415,6 @@ static bool construct(struct dc *dc,
|
||||||
if (!create_links(dc, init_params->num_virtual_links))
|
if (!create_links(dc, init_params->num_virtual_links))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
allocate_dc_stream_funcs(dc);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
|
@ -686,6 +483,7 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
||||||
|
|
||||||
dc->caps.max_links = dc->link_count;
|
dc->caps.max_links = dc->link_count;
|
||||||
dc->caps.max_audios = dc->res_pool->audio_count;
|
dc->caps.max_audios = dc->res_pool->audio_count;
|
||||||
|
dc->caps.linear_pitch_alignment = 64;
|
||||||
|
|
||||||
dc->config = init_params->flags;
|
dc->config = init_params->flags;
|
||||||
|
|
||||||
|
@ -712,6 +510,28 @@ void dc_destroy(struct dc **dc)
|
||||||
*dc = NULL;
|
*dc = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void enable_timing_multisync(
|
||||||
|
struct dc *dc,
|
||||||
|
struct dc_state *ctx)
|
||||||
|
{
|
||||||
|
int i = 0, multisync_count = 0;
|
||||||
|
int pipe_count = dc->res_pool->pipe_count;
|
||||||
|
struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
|
||||||
|
|
||||||
|
for (i = 0; i < pipe_count; i++) {
|
||||||
|
if (!ctx->res_ctx.pipe_ctx[i].stream ||
|
||||||
|
!ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
|
||||||
|
continue;
|
||||||
|
multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
|
||||||
|
multisync_count++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (multisync_count > 1) {
|
||||||
|
dc->hwss.enable_per_frame_crtc_position_reset(
|
||||||
|
dc, multisync_count, multisync_pipes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void program_timing_sync(
|
static void program_timing_sync(
|
||||||
struct dc *dc,
|
struct dc *dc,
|
||||||
struct dc_state *ctx)
|
struct dc_state *ctx)
|
||||||
|
@ -838,7 +658,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||||
struct dc_bios *dcb = dc->ctx->dc_bios;
|
struct dc_bios *dcb = dc->ctx->dc_bios;
|
||||||
enum dc_status result = DC_ERROR_UNEXPECTED;
|
enum dc_status result = DC_ERROR_UNEXPECTED;
|
||||||
struct pipe_ctx *pipe;
|
struct pipe_ctx *pipe;
|
||||||
int i, j, k, l;
|
int i, k, l;
|
||||||
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
|
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
|
||||||
|
|
||||||
disable_dangling_plane(dc, context);
|
disable_dangling_plane(dc, context);
|
||||||
|
@ -849,9 +669,44 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||||
if (!dcb->funcs->is_accelerated_mode(dcb))
|
if (!dcb->funcs->is_accelerated_mode(dcb))
|
||||||
dc->hwss.enable_accelerated_mode(dc);
|
dc->hwss.enable_accelerated_mode(dc);
|
||||||
|
|
||||||
|
/* re-program planes for existing stream, in case we need to
|
||||||
|
* free up plane resource for later use
|
||||||
|
*/
|
||||||
|
for (i = 0; i < context->stream_count; i++) {
|
||||||
|
if (context->streams[i]->mode_changed)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
dc->hwss.apply_ctx_for_surface(
|
||||||
|
dc, context->streams[i],
|
||||||
|
context->stream_status[i].plane_count,
|
||||||
|
context); /* use new pipe config in new context */
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Program hardware */
|
||||||
|
dc->hwss.ready_shared_resources(dc, context);
|
||||||
|
|
||||||
|
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||||
|
pipe = &context->res_ctx.pipe_ctx[i];
|
||||||
|
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
|
||||||
|
}
|
||||||
|
|
||||||
|
result = dc->hwss.apply_ctx_to_hw(dc, context);
|
||||||
|
|
||||||
|
if (result != DC_OK)
|
||||||
|
return result;
|
||||||
|
|
||||||
|
if (context->stream_count > 1) {
|
||||||
|
enable_timing_multisync(dc, context);
|
||||||
|
program_timing_sync(dc, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Program all planes within new context*/
|
||||||
for (i = 0; i < context->stream_count; i++) {
|
for (i = 0; i < context->stream_count; i++) {
|
||||||
const struct dc_sink *sink = context->streams[i]->sink;
|
const struct dc_sink *sink = context->streams[i]->sink;
|
||||||
|
|
||||||
|
if (!context->streams[i]->mode_changed)
|
||||||
|
continue;
|
||||||
|
|
||||||
dc->hwss.apply_ctx_for_surface(
|
dc->hwss.apply_ctx_for_surface(
|
||||||
dc, context->streams[i],
|
dc, context->streams[i],
|
||||||
context->stream_status[i].plane_count,
|
context->stream_status[i].plane_count,
|
||||||
|
@ -880,27 +735,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||||
context->streams[i]->timing.pix_clk_khz);
|
context->streams[i]->timing.pix_clk_khz);
|
||||||
}
|
}
|
||||||
|
|
||||||
dc->hwss.ready_shared_resources(dc, context);
|
|
||||||
|
|
||||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
|
||||||
pipe = &context->res_ctx.pipe_ctx[i];
|
|
||||||
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
|
|
||||||
}
|
|
||||||
result = dc->hwss.apply_ctx_to_hw(dc, context);
|
|
||||||
|
|
||||||
program_timing_sync(dc, context);
|
|
||||||
|
|
||||||
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
|
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
|
||||||
|
|
||||||
for (i = 0; i < context->stream_count; i++) {
|
|
||||||
for (j = 0; j < MAX_PIPES; j++) {
|
|
||||||
pipe = &context->res_ctx.pipe_ctx[j];
|
|
||||||
|
|
||||||
if (!pipe->top_pipe && pipe->stream == context->streams[i])
|
|
||||||
dc->hwss.pipe_control_lock(dc, pipe, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dc_release_state(dc->current_state);
|
dc_release_state(dc->current_state);
|
||||||
|
|
||||||
dc->current_state = context;
|
dc->current_state = context;
|
||||||
|
@ -936,7 +772,6 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
|
||||||
return (result == DC_OK);
|
return (result == DC_OK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
@ -945,9 +780,11 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||||
post_surface_trace(dc);
|
post_surface_trace(dc);
|
||||||
|
|
||||||
for (i = 0; i < dc->res_pool->pipe_count; i++)
|
for (i = 0; i < dc->res_pool->pipe_count; i++)
|
||||||
if (context->res_ctx.pipe_ctx[i].stream == NULL
|
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
|
||||||
|| context->res_ctx.pipe_ctx[i].plane_state == NULL)
|
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
|
||||||
dc->hwss.power_down_front_end(dc, i);
|
context->res_ctx.pipe_ctx[i].pipe_idx = i;
|
||||||
|
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
|
||||||
|
}
|
||||||
|
|
||||||
/* 3rd param should be true, temp w/a for RV*/
|
/* 3rd param should be true, temp w/a for RV*/
|
||||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||||
|
@ -1014,6 +851,7 @@ bool dc_commit_planes_to_stream(
|
||||||
flip_addr[i].address = plane_states[i]->address;
|
flip_addr[i].address = plane_states[i]->address;
|
||||||
flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
|
flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
|
||||||
plane_info[i].color_space = plane_states[i]->color_space;
|
plane_info[i].color_space = plane_states[i]->color_space;
|
||||||
|
plane_info[i].input_tf = plane_states[i]->input_tf;
|
||||||
plane_info[i].format = plane_states[i]->format;
|
plane_info[i].format = plane_states[i]->format;
|
||||||
plane_info[i].plane_size = plane_states[i]->plane_size;
|
plane_info[i].plane_size = plane_states[i]->plane_size;
|
||||||
plane_info[i].rotation = plane_states[i]->rotation;
|
plane_info[i].rotation = plane_states[i]->rotation;
|
||||||
|
@ -1118,79 +956,91 @@ static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum surface_update_type get_plane_info_update_type(
|
static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
|
||||||
const struct dc_surface_update *u,
|
|
||||||
int surface_index)
|
|
||||||
{
|
{
|
||||||
struct dc_plane_info temp_plane_info;
|
union surface_update_flags *update_flags = &u->surface->update_flags;
|
||||||
memset(&temp_plane_info, 0, sizeof(temp_plane_info));
|
|
||||||
|
|
||||||
if (!u->plane_info)
|
if (!u->plane_info)
|
||||||
return UPDATE_TYPE_FAST;
|
return UPDATE_TYPE_FAST;
|
||||||
|
|
||||||
temp_plane_info = *u->plane_info;
|
if (u->plane_info->color_space != u->surface->color_space)
|
||||||
|
update_flags->bits.color_space_change = 1;
|
||||||
|
|
||||||
/* Copy all parameters that will cause a full update
|
if (u->plane_info->input_tf != u->surface->input_tf)
|
||||||
* from current surface, the rest of the parameters
|
update_flags->bits.input_tf_change = 1;
|
||||||
* from provided plane configuration.
|
|
||||||
* Perform memory compare and special validation
|
|
||||||
* for those that can cause fast/medium updates
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Full update parameters */
|
if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
|
||||||
temp_plane_info.color_space = u->surface->color_space;
|
update_flags->bits.horizontal_mirror_change = 1;
|
||||||
temp_plane_info.dcc = u->surface->dcc;
|
|
||||||
temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
|
|
||||||
temp_plane_info.plane_size = u->surface->plane_size;
|
|
||||||
temp_plane_info.rotation = u->surface->rotation;
|
|
||||||
temp_plane_info.stereo_format = u->surface->stereo_format;
|
|
||||||
|
|
||||||
if (surface_index == 0)
|
if (u->plane_info->rotation != u->surface->rotation)
|
||||||
temp_plane_info.visible = u->plane_info->visible;
|
update_flags->bits.rotation_change = 1;
|
||||||
else
|
|
||||||
temp_plane_info.visible = u->surface->visible;
|
|
||||||
|
|
||||||
if (memcmp(u->plane_info, &temp_plane_info,
|
if (u->plane_info->stereo_format != u->surface->stereo_format)
|
||||||
sizeof(struct dc_plane_info)) != 0)
|
update_flags->bits.stereo_format_change = 1;
|
||||||
return UPDATE_TYPE_FULL;
|
|
||||||
|
if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
|
||||||
|
update_flags->bits.per_pixel_alpha_change = 1;
|
||||||
|
|
||||||
if (pixel_format_to_bpp(u->plane_info->format) !=
|
if (pixel_format_to_bpp(u->plane_info->format) !=
|
||||||
pixel_format_to_bpp(u->surface->format)) {
|
pixel_format_to_bpp(u->surface->format))
|
||||||
/* different bytes per element will require full bandwidth
|
/* different bytes per element will require full bandwidth
|
||||||
* and DML calculation
|
* and DML calculation
|
||||||
*/
|
*/
|
||||||
return UPDATE_TYPE_FULL;
|
update_flags->bits.bpp_change = 1;
|
||||||
}
|
|
||||||
|
|
||||||
if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
|
if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
|
||||||
sizeof(union dc_tiling_info)) != 0) {
|
sizeof(union dc_tiling_info)) != 0) {
|
||||||
|
update_flags->bits.swizzle_change = 1;
|
||||||
/* todo: below are HW dependent, we should add a hook to
|
/* todo: below are HW dependent, we should add a hook to
|
||||||
* DCE/N resource and validated there.
|
* DCE/N resource and validated there.
|
||||||
*/
|
*/
|
||||||
if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
|
if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR)
|
||||||
/* swizzled mode requires RQ to be setup properly,
|
/* swizzled mode requires RQ to be setup properly,
|
||||||
* thus need to run DML to calculate RQ settings
|
* thus need to run DML to calculate RQ settings
|
||||||
*/
|
*/
|
||||||
return UPDATE_TYPE_FULL;
|
update_flags->bits.bandwidth_change = 1;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (update_flags->bits.rotation_change
|
||||||
|
|| update_flags->bits.stereo_format_change
|
||||||
|
|| update_flags->bits.bpp_change
|
||||||
|
|| update_flags->bits.bandwidth_change)
|
||||||
|
return UPDATE_TYPE_FULL;
|
||||||
|
|
||||||
return UPDATE_TYPE_MED;
|
return UPDATE_TYPE_MED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum surface_update_type get_scaling_info_update_type(
|
static enum surface_update_type get_scaling_info_update_type(
|
||||||
const struct dc_surface_update *u)
|
const struct dc_surface_update *u)
|
||||||
{
|
{
|
||||||
|
union surface_update_flags *update_flags = &u->surface->update_flags;
|
||||||
|
|
||||||
if (!u->scaling_info)
|
if (!u->scaling_info)
|
||||||
return UPDATE_TYPE_FAST;
|
return UPDATE_TYPE_FAST;
|
||||||
|
|
||||||
if (u->scaling_info->src_rect.width != u->surface->src_rect.width
|
if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
|
||||||
|| u->scaling_info->src_rect.height != u->surface->src_rect.height
|
|
||||||
|| u->scaling_info->clip_rect.width != u->surface->clip_rect.width
|
|
||||||
|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height
|
|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height
|
||||||
|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width
|
|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width
|
||||||
|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
|
|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
|
||||||
return UPDATE_TYPE_FULL;
|
update_flags->bits.scaling_change = 1;
|
||||||
|
|
||||||
|
if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
|
||||||
|
|| u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
|
||||||
|
&& (u->scaling_info->dst_rect.width < u->surface->src_rect.width
|
||||||
|
|| u->scaling_info->dst_rect.height < u->surface->src_rect.height))
|
||||||
|
/* Making dst rect smaller requires a bandwidth change */
|
||||||
|
update_flags->bits.bandwidth_change = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (u->scaling_info->src_rect.width != u->surface->src_rect.width
|
||||||
|
|| u->scaling_info->src_rect.height != u->surface->src_rect.height) {
|
||||||
|
|
||||||
|
update_flags->bits.scaling_change = 1;
|
||||||
|
if (u->scaling_info->src_rect.width > u->surface->src_rect.width
|
||||||
|
&& u->scaling_info->src_rect.height > u->surface->src_rect.height)
|
||||||
|
/* Making src rect bigger requires a bandwidth change */
|
||||||
|
update_flags->bits.clock_change = 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (u->scaling_info->src_rect.x != u->surface->src_rect.x
|
if (u->scaling_info->src_rect.x != u->surface->src_rect.x
|
||||||
|| u->scaling_info->src_rect.y != u->surface->src_rect.y
|
|| u->scaling_info->src_rect.y != u->surface->src_rect.y
|
||||||
|
@ -1198,41 +1048,56 @@ static enum surface_update_type get_scaling_info_update_type(
|
||||||
|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
|
|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
|
||||||
|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
|
|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
|
||||||
|| u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
|
|| u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
|
||||||
|
update_flags->bits.position_change = 1;
|
||||||
|
|
||||||
|
if (update_flags->bits.clock_change
|
||||||
|
|| update_flags->bits.bandwidth_change)
|
||||||
|
return UPDATE_TYPE_FULL;
|
||||||
|
|
||||||
|
if (update_flags->bits.scaling_change
|
||||||
|
|| update_flags->bits.position_change)
|
||||||
return UPDATE_TYPE_MED;
|
return UPDATE_TYPE_MED;
|
||||||
|
|
||||||
return UPDATE_TYPE_FAST;
|
return UPDATE_TYPE_FAST;
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum surface_update_type det_surface_update(
|
static enum surface_update_type det_surface_update(const struct dc *dc,
|
||||||
const struct dc *dc,
|
const struct dc_surface_update *u)
|
||||||
const struct dc_surface_update *u,
|
|
||||||
int surface_index)
|
|
||||||
{
|
{
|
||||||
const struct dc_state *context = dc->current_state;
|
const struct dc_state *context = dc->current_state;
|
||||||
enum surface_update_type type = UPDATE_TYPE_FAST;
|
enum surface_update_type type;
|
||||||
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
|
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
|
||||||
|
union surface_update_flags *update_flags = &u->surface->update_flags;
|
||||||
|
|
||||||
if (!is_surface_in_context(context, u->surface))
|
update_flags->raw = 0; // Reset all flags
|
||||||
|
|
||||||
|
if (!is_surface_in_context(context, u->surface)) {
|
||||||
|
update_flags->bits.new_plane = 1;
|
||||||
return UPDATE_TYPE_FULL;
|
return UPDATE_TYPE_FULL;
|
||||||
|
}
|
||||||
|
|
||||||
type = get_plane_info_update_type(u, surface_index);
|
type = get_plane_info_update_type(u);
|
||||||
if (overall_type < type)
|
elevate_update_type(&overall_type, type);
|
||||||
overall_type = type;
|
|
||||||
|
|
||||||
type = get_scaling_info_update_type(u);
|
type = get_scaling_info_update_type(u);
|
||||||
if (overall_type < type)
|
elevate_update_type(&overall_type, type);
|
||||||
overall_type = type;
|
|
||||||
|
|
||||||
if (u->in_transfer_func ||
|
if (u->in_transfer_func)
|
||||||
u->hdr_static_metadata) {
|
update_flags->bits.in_transfer_func = 1;
|
||||||
if (overall_type < UPDATE_TYPE_MED)
|
|
||||||
overall_type = UPDATE_TYPE_MED;
|
if (u->input_csc_color_matrix)
|
||||||
|
update_flags->bits.input_csc_change = 1;
|
||||||
|
|
||||||
|
if (update_flags->bits.in_transfer_func
|
||||||
|
|| update_flags->bits.input_csc_change) {
|
||||||
|
type = UPDATE_TYPE_MED;
|
||||||
|
elevate_update_type(&overall_type, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
return overall_type;
|
return overall_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum surface_update_type dc_check_update_surfaces_for_stream(
|
static enum surface_update_type check_update_surfaces_for_stream(
|
||||||
struct dc *dc,
|
struct dc *dc,
|
||||||
struct dc_surface_update *updates,
|
struct dc_surface_update *updates,
|
||||||
int surface_count,
|
int surface_count,
|
||||||
|
@ -1250,18 +1115,38 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
|
||||||
|
|
||||||
for (i = 0 ; i < surface_count; i++) {
|
for (i = 0 ; i < surface_count; i++) {
|
||||||
enum surface_update_type type =
|
enum surface_update_type type =
|
||||||
det_surface_update(dc, &updates[i], i);
|
det_surface_update(dc, &updates[i]);
|
||||||
|
|
||||||
if (type == UPDATE_TYPE_FULL)
|
if (type == UPDATE_TYPE_FULL)
|
||||||
return type;
|
return type;
|
||||||
|
|
||||||
if (overall_type < type)
|
elevate_update_type(&overall_type, type);
|
||||||
overall_type = type;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return overall_type;
|
return overall_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum surface_update_type dc_check_update_surfaces_for_stream(
|
||||||
|
struct dc *dc,
|
||||||
|
struct dc_surface_update *updates,
|
||||||
|
int surface_count,
|
||||||
|
struct dc_stream_update *stream_update,
|
||||||
|
const struct dc_stream_status *stream_status)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
enum surface_update_type type;
|
||||||
|
|
||||||
|
for (i = 0; i < surface_count; i++)
|
||||||
|
updates[i].surface->update_flags.raw = 0;
|
||||||
|
|
||||||
|
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
|
||||||
|
if (type == UPDATE_TYPE_FULL)
|
||||||
|
for (i = 0; i < surface_count; i++)
|
||||||
|
updates[i].surface->update_flags.bits.full_update = 1;
|
||||||
|
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
static struct dc_stream_status *stream_get_status(
|
static struct dc_stream_status *stream_get_status(
|
||||||
struct dc_state *ctx,
|
struct dc_state *ctx,
|
||||||
struct dc_stream_state *stream)
|
struct dc_stream_state *stream)
|
||||||
|
@ -1293,9 +1178,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||||
if (update_type == UPDATE_TYPE_FULL) {
|
if (update_type == UPDATE_TYPE_FULL) {
|
||||||
dc->hwss.set_bandwidth(dc, context, false);
|
dc->hwss.set_bandwidth(dc, context, false);
|
||||||
context_clock_trace(dc, context);
|
context_clock_trace(dc, context);
|
||||||
}
|
|
||||||
|
|
||||||
if (update_type > UPDATE_TYPE_FAST) {
|
|
||||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
||||||
|
|
||||||
|
@ -1312,103 +1195,58 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Lock pipes for provided surfaces, or all active if full update*/
|
|
||||||
for (i = 0; i < surface_count; i++) {
|
|
||||||
struct dc_plane_state *plane_state = srf_updates[i].surface;
|
|
||||||
|
|
||||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
|
||||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
|
||||||
|
|
||||||
if (update_type != UPDATE_TYPE_FULL && pipe_ctx->plane_state != plane_state)
|
|
||||||
continue;
|
|
||||||
if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
dc->hwss.pipe_control_lock(
|
|
||||||
dc,
|
|
||||||
pipe_ctx,
|
|
||||||
true);
|
|
||||||
}
|
|
||||||
if (update_type == UPDATE_TYPE_FULL)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Full fe update*/
|
/* Full fe update*/
|
||||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
||||||
|
|
||||||
if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->plane_state)
|
if (update_type == UPDATE_TYPE_FAST || !pipe_ctx->plane_state)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!pipe_ctx->top_pipe && pipe_ctx->stream) {
|
if (!pipe_ctx->top_pipe &&
|
||||||
struct dc_stream_status *stream_status = stream_get_status(context, pipe_ctx->stream);
|
pipe_ctx->stream &&
|
||||||
|
pipe_ctx->stream == stream) {
|
||||||
|
struct dc_stream_status *stream_status =
|
||||||
|
stream_get_status(context, pipe_ctx->stream);
|
||||||
|
|
||||||
dc->hwss.apply_ctx_for_surface(
|
dc->hwss.apply_ctx_for_surface(
|
||||||
dc, pipe_ctx->stream, stream_status->plane_count, context);
|
dc, pipe_ctx->stream, stream_status->plane_count, context);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (update_type > UPDATE_TYPE_FAST)
|
if (update_type == UPDATE_TYPE_FULL)
|
||||||
context_timing_trace(dc, &context->res_ctx);
|
context_timing_trace(dc, &context->res_ctx);
|
||||||
|
|
||||||
/* Perform requested Updates */
|
/* Perform requested Updates */
|
||||||
for (i = 0; i < surface_count; i++) {
|
for (i = 0; i < surface_count; i++) {
|
||||||
struct dc_plane_state *plane_state = srf_updates[i].surface;
|
struct dc_plane_state *plane_state = srf_updates[i].surface;
|
||||||
|
|
||||||
if (update_type == UPDATE_TYPE_MED)
|
|
||||||
dc->hwss.apply_ctx_for_surface(
|
|
||||||
dc, stream, surface_count, context);
|
|
||||||
|
|
||||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
||||||
|
|
||||||
|
if (pipe_ctx->stream != stream)
|
||||||
|
continue;
|
||||||
|
|
||||||
if (pipe_ctx->plane_state != plane_state)
|
if (pipe_ctx->plane_state != plane_state)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (srf_updates[i].flip_addr)
|
if (update_type == UPDATE_TYPE_FAST && srf_updates[i].flip_addr)
|
||||||
dc->hwss.update_plane_addr(dc, pipe_ctx);
|
dc->hwss.update_plane_addr(dc, pipe_ctx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (update_type == UPDATE_TYPE_FAST)
|
if (stream && stream_update && update_type > UPDATE_TYPE_FAST)
|
||||||
|
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||||
|
struct pipe_ctx *pipe_ctx =
|
||||||
|
&context->res_ctx.pipe_ctx[j];
|
||||||
|
|
||||||
|
if (pipe_ctx->stream != stream)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* work around to program degamma regs for split pipe after set mode. */
|
if (stream_update->hdr_static_metadata) {
|
||||||
if (srf_updates[i].in_transfer_func || (pipe_ctx->top_pipe &&
|
|
||||||
pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state))
|
|
||||||
dc->hwss.set_input_transfer_func(
|
|
||||||
pipe_ctx, pipe_ctx->plane_state);
|
|
||||||
|
|
||||||
if (stream_update != NULL &&
|
|
||||||
stream_update->out_transfer_func != NULL) {
|
|
||||||
dc->hwss.set_output_transfer_func(
|
|
||||||
pipe_ctx, pipe_ctx->stream);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (srf_updates[i].hdr_static_metadata) {
|
|
||||||
resource_build_info_frame(pipe_ctx);
|
resource_build_info_frame(pipe_ctx);
|
||||||
dc->hwss.update_info_frame(pipe_ctx);
|
dc->hwss.update_info_frame(pipe_ctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/* Unlock pipes */
|
|
||||||
for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
|
|
||||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
|
||||||
|
|
||||||
for (j = 0; j < surface_count; j++) {
|
|
||||||
if (update_type != UPDATE_TYPE_FULL &&
|
|
||||||
srf_updates[j].surface != pipe_ctx->plane_state)
|
|
||||||
continue;
|
|
||||||
if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
dc->hwss.pipe_control_lock(
|
|
||||||
dc,
|
|
||||||
pipe_ctx,
|
|
||||||
false);
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void dc_commit_updates_for_stream(struct dc *dc,
|
void dc_commit_updates_for_stream(struct dc *dc,
|
||||||
|
@ -1480,10 +1318,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||||
stream_update,
|
stream_update,
|
||||||
update_type,
|
update_type,
|
||||||
context);
|
context);
|
||||||
|
/*update current_State*/
|
||||||
if (update_type >= UPDATE_TYPE_FULL)
|
|
||||||
dc_post_update_surfaces_to_stream(dc);
|
|
||||||
|
|
||||||
if (dc->current_state != context) {
|
if (dc->current_state != context) {
|
||||||
|
|
||||||
struct dc_state *old = dc->current_state;
|
struct dc_state *old = dc->current_state;
|
||||||
|
@ -1492,6 +1327,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||||
dc_release_state(old);
|
dc_release_state(old);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
/*let's use current_state to update watermark etc*/
|
||||||
|
if (update_type >= UPDATE_TYPE_FULL)
|
||||||
|
dc_post_update_surfaces_to_stream(dc);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
|
@ -137,6 +137,7 @@ void pre_surface_trace(
|
||||||
"plane_state->tiling_info.gfx8.pipe_config = %d;\n"
|
"plane_state->tiling_info.gfx8.pipe_config = %d;\n"
|
||||||
"plane_state->tiling_info.gfx8.array_mode = %d;\n"
|
"plane_state->tiling_info.gfx8.array_mode = %d;\n"
|
||||||
"plane_state->color_space = %d;\n"
|
"plane_state->color_space = %d;\n"
|
||||||
|
"plane_state->input_tf = %d;\n"
|
||||||
"plane_state->dcc.enable = %d;\n"
|
"plane_state->dcc.enable = %d;\n"
|
||||||
"plane_state->format = %d;\n"
|
"plane_state->format = %d;\n"
|
||||||
"plane_state->rotation = %d;\n"
|
"plane_state->rotation = %d;\n"
|
||||||
|
@ -144,6 +145,7 @@ void pre_surface_trace(
|
||||||
plane_state->tiling_info.gfx8.pipe_config,
|
plane_state->tiling_info.gfx8.pipe_config,
|
||||||
plane_state->tiling_info.gfx8.array_mode,
|
plane_state->tiling_info.gfx8.array_mode,
|
||||||
plane_state->color_space,
|
plane_state->color_space,
|
||||||
|
plane_state->input_tf,
|
||||||
plane_state->dcc.enable,
|
plane_state->dcc.enable,
|
||||||
plane_state->format,
|
plane_state->format,
|
||||||
plane_state->rotation,
|
plane_state->rotation,
|
||||||
|
@ -184,6 +186,7 @@ void update_surface_trace(
|
||||||
if (update->plane_info) {
|
if (update->plane_info) {
|
||||||
SURFACE_TRACE(
|
SURFACE_TRACE(
|
||||||
"plane_info->color_space = %d;\n"
|
"plane_info->color_space = %d;\n"
|
||||||
|
"plane_info->input_tf = %d;\n"
|
||||||
"plane_info->format = %d;\n"
|
"plane_info->format = %d;\n"
|
||||||
"plane_info->plane_size.grph.surface_pitch = %d;\n"
|
"plane_info->plane_size.grph.surface_pitch = %d;\n"
|
||||||
"plane_info->plane_size.grph.surface_size.height = %d;\n"
|
"plane_info->plane_size.grph.surface_size.height = %d;\n"
|
||||||
|
@ -192,6 +195,7 @@ void update_surface_trace(
|
||||||
"plane_info->plane_size.grph.surface_size.y = %d;\n"
|
"plane_info->plane_size.grph.surface_size.y = %d;\n"
|
||||||
"plane_info->rotation = %d;\n",
|
"plane_info->rotation = %d;\n",
|
||||||
update->plane_info->color_space,
|
update->plane_info->color_space,
|
||||||
|
update->plane_info->input_tf,
|
||||||
update->plane_info->format,
|
update->plane_info->format,
|
||||||
update->plane_info->plane_size.grph.surface_pitch,
|
update->plane_info->plane_size.grph.surface_pitch,
|
||||||
update->plane_info->plane_size.grph.surface_size.height,
|
update->plane_info->plane_size.grph.surface_size.height,
|
||||||
|
|
|
@ -1798,7 +1798,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
|
||||||
else
|
else
|
||||||
dp_disable_link_phy_mst(link, signal);
|
dp_disable_link_phy_mst(link, signal);
|
||||||
} else
|
} else
|
||||||
link->link_enc->funcs->disable_output(link->link_enc, signal, link);
|
link->link_enc->funcs->disable_output(link->link_enc, signal);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool dp_active_dongle_validate_timing(
|
bool dp_active_dongle_validate_timing(
|
||||||
|
@ -1869,7 +1869,7 @@ enum dc_status dc_link_validate_mode_timing(
|
||||||
const struct dc_crtc_timing *timing)
|
const struct dc_crtc_timing *timing)
|
||||||
{
|
{
|
||||||
uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
|
uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
|
||||||
struct dc_dongle_caps *dongle_caps = &link->link_status.dpcd_caps->dongle_caps;
|
struct dc_dongle_caps *dongle_caps = &link->dpcd_caps.dongle_caps;
|
||||||
|
|
||||||
/* A hack to avoid failing any modes for EDID override feature on
|
/* A hack to avoid failing any modes for EDID override feature on
|
||||||
* topology change such as lower quality cable for DP or different dongle
|
* topology change such as lower quality cable for DP or different dongle
|
||||||
|
|
|
@ -220,8 +220,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
|
||||||
size_in_bytes);
|
size_in_bytes);
|
||||||
|
|
||||||
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
||||||
"%s:\n %x VS set = %x PE set = %x \
|
"%s:\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
|
||||||
max VS Reached = %x max PE Reached = %x\n",
|
|
||||||
__func__,
|
__func__,
|
||||||
DP_TRAINING_LANE0_SET,
|
DP_TRAINING_LANE0_SET,
|
||||||
dpcd_lane[0].bits.VOLTAGE_SWING_SET,
|
dpcd_lane[0].bits.VOLTAGE_SWING_SET,
|
||||||
|
@ -558,8 +557,7 @@ static void dpcd_set_lane_settings(
|
||||||
*/
|
*/
|
||||||
|
|
||||||
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
|
||||||
"%s\n %x VS set = %x PE set = %x \
|
"%s\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
|
||||||
max VS Reached = %x max PE Reached = %x\n",
|
|
||||||
__func__,
|
__func__,
|
||||||
DP_TRAINING_LANE0_SET,
|
DP_TRAINING_LANE0_SET,
|
||||||
dpcd_lane[0].bits.VOLTAGE_SWING_SET,
|
dpcd_lane[0].bits.VOLTAGE_SWING_SET,
|
||||||
|
@ -872,9 +870,8 @@ static bool perform_clock_recovery_sequence(
|
||||||
if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
|
if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
|
||||||
ASSERT(0);
|
ASSERT(0);
|
||||||
dm_logger_write(link->ctx->logger, LOG_ERROR,
|
dm_logger_write(link->ctx->logger, LOG_ERROR,
|
||||||
"%s: Link Training Error, could not \
|
"%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
|
||||||
get CR after %d tries. \
|
__func__,
|
||||||
Possibly voltage swing issue", __func__,
|
|
||||||
LINK_TRAINING_MAX_CR_RETRY);
|
LINK_TRAINING_MAX_CR_RETRY);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -2127,7 +2124,7 @@ static void get_active_converter_info(
|
||||||
|
|
||||||
union dwnstream_port_caps_byte3_hdmi
|
union dwnstream_port_caps_byte3_hdmi
|
||||||
hdmi_caps = {.raw = det_caps[3] };
|
hdmi_caps = {.raw = det_caps[3] };
|
||||||
union dwnstream_port_caps_byte1
|
union dwnstream_port_caps_byte2
|
||||||
hdmi_color_caps = {.raw = det_caps[2] };
|
hdmi_color_caps = {.raw = det_caps[2] };
|
||||||
link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
|
link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
|
||||||
det_caps[1] * 25000;
|
det_caps[1] * 25000;
|
||||||
|
|
|
@ -89,7 +89,7 @@ void dp_enable_link_phy(
|
||||||
|
|
||||||
if (dc_is_dp_sst_signal(signal)) {
|
if (dc_is_dp_sst_signal(signal)) {
|
||||||
if (signal == SIGNAL_TYPE_EDP) {
|
if (signal == SIGNAL_TYPE_EDP) {
|
||||||
link->dc->hwss.edp_power_control(link->link_enc, true);
|
link->dc->hwss.edp_power_control(link, true);
|
||||||
link_enc->funcs->enable_dp_output(
|
link_enc->funcs->enable_dp_output(
|
||||||
link_enc,
|
link_enc,
|
||||||
link_settings,
|
link_settings,
|
||||||
|
@ -140,10 +140,10 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
|
||||||
if (signal == SIGNAL_TYPE_EDP) {
|
if (signal == SIGNAL_TYPE_EDP) {
|
||||||
link->dc->hwss.edp_backlight_control(link, false);
|
link->dc->hwss.edp_backlight_control(link, false);
|
||||||
edp_receiver_ready_T9(link);
|
edp_receiver_ready_T9(link);
|
||||||
link->link_enc->funcs->disable_output(link->link_enc, signal, link);
|
link->link_enc->funcs->disable_output(link->link_enc, signal);
|
||||||
link->dc->hwss.edp_power_control(link->link_enc, false);
|
link->dc->hwss.edp_power_control(link, false);
|
||||||
} else
|
} else
|
||||||
link->link_enc->funcs->disable_output(link->link_enc, signal, link);
|
link->link_enc->funcs->disable_output(link->link_enc, signal);
|
||||||
|
|
||||||
/* Clear current link setting.*/
|
/* Clear current link setting.*/
|
||||||
memset(&link->cur_link_settings, 0,
|
memset(&link->cur_link_settings, 0,
|
||||||
|
@ -286,8 +286,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
|
||||||
|
|
||||||
link->link_enc->funcs->disable_output(
|
link->link_enc->funcs->disable_output(
|
||||||
link->link_enc,
|
link->link_enc,
|
||||||
SIGNAL_TYPE_DISPLAY_PORT,
|
SIGNAL_TYPE_DISPLAY_PORT);
|
||||||
link);
|
|
||||||
|
|
||||||
/* Clear current link setting. */
|
/* Clear current link setting. */
|
||||||
memset(&link->cur_link_settings, 0,
|
memset(&link->cur_link_settings, 0,
|
||||||
|
|
|
@ -426,15 +426,8 @@ static enum pixel_format convert_pixel_format_to_dalsurface(
|
||||||
|
|
||||||
static void rect_swap_helper(struct rect *rect)
|
static void rect_swap_helper(struct rect *rect)
|
||||||
{
|
{
|
||||||
uint32_t temp = 0;
|
swap(rect->height, rect->width);
|
||||||
|
swap(rect->x, rect->y);
|
||||||
temp = rect->height;
|
|
||||||
rect->height = rect->width;
|
|
||||||
rect->width = temp;
|
|
||||||
|
|
||||||
temp = rect->x;
|
|
||||||
rect->x = rect->y;
|
|
||||||
rect->y = temp;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void calculate_viewport(struct pipe_ctx *pipe_ctx)
|
static void calculate_viewport(struct pipe_ctx *pipe_ctx)
|
||||||
|
@ -2319,20 +2312,13 @@ static void set_spd_info_packet(
|
||||||
|
|
||||||
static void set_hdr_static_info_packet(
|
static void set_hdr_static_info_packet(
|
||||||
struct encoder_info_packet *info_packet,
|
struct encoder_info_packet *info_packet,
|
||||||
struct dc_plane_state *plane_state,
|
|
||||||
struct dc_stream_state *stream)
|
struct dc_stream_state *stream)
|
||||||
{
|
{
|
||||||
uint16_t i = 0;
|
uint16_t i = 0;
|
||||||
enum signal_type signal = stream->signal;
|
enum signal_type signal = stream->signal;
|
||||||
struct dc_hdr_static_metadata hdr_metadata;
|
|
||||||
uint32_t data;
|
uint32_t data;
|
||||||
|
|
||||||
if (!plane_state)
|
if (!stream->hdr_static_metadata.hdr_supported)
|
||||||
return;
|
|
||||||
|
|
||||||
hdr_metadata = plane_state->hdr_static_ctx;
|
|
||||||
|
|
||||||
if (!hdr_metadata.hdr_supported)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (dc_is_hdmi_signal(signal)) {
|
if (dc_is_hdmi_signal(signal)) {
|
||||||
|
@ -2352,55 +2338,55 @@ static void set_hdr_static_info_packet(
|
||||||
i = 2;
|
i = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
data = hdr_metadata.is_hdr;
|
data = stream->hdr_static_metadata.is_hdr;
|
||||||
info_packet->sb[i++] = data ? 0x02 : 0x00;
|
info_packet->sb[i++] = data ? 0x02 : 0x00;
|
||||||
info_packet->sb[i++] = 0x00;
|
info_packet->sb[i++] = 0x00;
|
||||||
|
|
||||||
data = hdr_metadata.chromaticity_green_x / 2;
|
data = stream->hdr_static_metadata.chromaticity_green_x / 2;
|
||||||
info_packet->sb[i++] = data & 0xFF;
|
info_packet->sb[i++] = data & 0xFF;
|
||||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||||
|
|
||||||
data = hdr_metadata.chromaticity_green_y / 2;
|
data = stream->hdr_static_metadata.chromaticity_green_y / 2;
|
||||||
info_packet->sb[i++] = data & 0xFF;
|
info_packet->sb[i++] = data & 0xFF;
|
||||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||||
|
|
||||||
data = hdr_metadata.chromaticity_blue_x / 2;
|
data = stream->hdr_static_metadata.chromaticity_blue_x / 2;
|
||||||
info_packet->sb[i++] = data & 0xFF;
|
info_packet->sb[i++] = data & 0xFF;
|
||||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||||
|
|
||||||
data = hdr_metadata.chromaticity_blue_y / 2;
|
data = stream->hdr_static_metadata.chromaticity_blue_y / 2;
|
||||||
info_packet->sb[i++] = data & 0xFF;
|
info_packet->sb[i++] = data & 0xFF;
|
||||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||||
|
|
||||||
data = hdr_metadata.chromaticity_red_x / 2;
|
data = stream->hdr_static_metadata.chromaticity_red_x / 2;
|
||||||
info_packet->sb[i++] = data & 0xFF;
|
info_packet->sb[i++] = data & 0xFF;
|
||||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||||
|
|
||||||
data = hdr_metadata.chromaticity_red_y / 2;
|
data = stream->hdr_static_metadata.chromaticity_red_y / 2;
|
||||||
info_packet->sb[i++] = data & 0xFF;
|
info_packet->sb[i++] = data & 0xFF;
|
||||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||||
|
|
||||||
data = hdr_metadata.chromaticity_white_point_x / 2;
|
data = stream->hdr_static_metadata.chromaticity_white_point_x / 2;
|
||||||
info_packet->sb[i++] = data & 0xFF;
|
info_packet->sb[i++] = data & 0xFF;
|
||||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||||
|
|
||||||
data = hdr_metadata.chromaticity_white_point_y / 2;
|
data = stream->hdr_static_metadata.chromaticity_white_point_y / 2;
|
||||||
info_packet->sb[i++] = data & 0xFF;
|
info_packet->sb[i++] = data & 0xFF;
|
||||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||||
|
|
||||||
data = hdr_metadata.max_luminance;
|
data = stream->hdr_static_metadata.max_luminance;
|
||||||
info_packet->sb[i++] = data & 0xFF;
|
info_packet->sb[i++] = data & 0xFF;
|
||||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||||
|
|
||||||
data = hdr_metadata.min_luminance;
|
data = stream->hdr_static_metadata.min_luminance;
|
||||||
info_packet->sb[i++] = data & 0xFF;
|
info_packet->sb[i++] = data & 0xFF;
|
||||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||||
|
|
||||||
data = hdr_metadata.maximum_content_light_level;
|
data = stream->hdr_static_metadata.maximum_content_light_level;
|
||||||
info_packet->sb[i++] = data & 0xFF;
|
info_packet->sb[i++] = data & 0xFF;
|
||||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||||
|
|
||||||
data = hdr_metadata.maximum_frame_average_light_level;
|
data = stream->hdr_static_metadata.maximum_frame_average_light_level;
|
||||||
info_packet->sb[i++] = data & 0xFF;
|
info_packet->sb[i++] = data & 0xFF;
|
||||||
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
info_packet->sb[i++] = (data & 0xFF00) >> 8;
|
||||||
|
|
||||||
|
@ -2551,16 +2537,14 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
|
||||||
|
|
||||||
set_spd_info_packet(&info->spd, pipe_ctx->stream);
|
set_spd_info_packet(&info->spd, pipe_ctx->stream);
|
||||||
|
|
||||||
set_hdr_static_info_packet(&info->hdrsmd,
|
set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
|
||||||
pipe_ctx->plane_state, pipe_ctx->stream);
|
|
||||||
|
|
||||||
} else if (dc_is_dp_signal(signal)) {
|
} else if (dc_is_dp_signal(signal)) {
|
||||||
set_vsc_info_packet(&info->vsc, pipe_ctx->stream);
|
set_vsc_info_packet(&info->vsc, pipe_ctx->stream);
|
||||||
|
|
||||||
set_spd_info_packet(&info->spd, pipe_ctx->stream);
|
set_spd_info_packet(&info->spd, pipe_ctx->stream);
|
||||||
|
|
||||||
set_hdr_static_info_packet(&info->hdrsmd,
|
set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
|
||||||
pipe_ctx->plane_state, pipe_ctx->stream);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
patch_gamut_packet_checksum(&info->gamut);
|
patch_gamut_packet_checksum(&info->gamut);
|
||||||
|
|
|
@ -36,16 +36,13 @@
|
||||||
#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000
|
#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000
|
||||||
static void update_stream_signal(struct dc_stream_state *stream)
|
static void update_stream_signal(struct dc_stream_state *stream)
|
||||||
{
|
{
|
||||||
if (stream->output_signal == SIGNAL_TYPE_NONE) {
|
|
||||||
struct dc_sink *dc_sink = stream->sink;
|
|
||||||
|
|
||||||
if (dc_sink->sink_signal == SIGNAL_TYPE_NONE)
|
struct dc_sink *dc_sink = stream->sink;
|
||||||
stream->signal = stream->sink->link->connector_signal;
|
|
||||||
else
|
if (dc_sink->sink_signal == SIGNAL_TYPE_NONE)
|
||||||
stream->signal = dc_sink->sink_signal;
|
stream->signal = stream->sink->link->connector_signal;
|
||||||
} else {
|
else
|
||||||
stream->signal = stream->output_signal;
|
stream->signal = dc_sink->sink_signal;
|
||||||
}
|
|
||||||
|
|
||||||
if (dc_is_dvi_signal(stream->signal)) {
|
if (dc_is_dvi_signal(stream->signal)) {
|
||||||
if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST &&
|
if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST &&
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
#include "inc/compressor.h"
|
#include "inc/compressor.h"
|
||||||
#include "dml/display_mode_lib.h"
|
#include "dml/display_mode_lib.h"
|
||||||
|
|
||||||
#define DC_VER "3.1.07"
|
#define DC_VER "3.1.20"
|
||||||
|
|
||||||
#define MAX_SURFACES 3
|
#define MAX_SURFACES 3
|
||||||
#define MAX_STREAMS 6
|
#define MAX_STREAMS 6
|
||||||
|
@ -58,8 +58,10 @@ struct dc_caps {
|
||||||
uint32_t i2c_speed_in_khz;
|
uint32_t i2c_speed_in_khz;
|
||||||
unsigned int max_cursor_size;
|
unsigned int max_cursor_size;
|
||||||
unsigned int max_video_width;
|
unsigned int max_video_width;
|
||||||
|
int linear_pitch_alignment;
|
||||||
bool dcc_const_color;
|
bool dcc_const_color;
|
||||||
bool dynamic_audio;
|
bool dynamic_audio;
|
||||||
|
bool is_apu;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dc_dcc_surface_param {
|
struct dc_dcc_surface_param {
|
||||||
|
@ -97,69 +99,53 @@ struct dc_static_screen_events {
|
||||||
bool overlay_update;
|
bool overlay_update;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* Surface update type is used by dc_update_surfaces_and_stream
|
||||||
|
* The update type is determined at the very beginning of the function based
|
||||||
|
* on parameters passed in and decides how much programming (or updating) is
|
||||||
|
* going to be done during the call.
|
||||||
|
*
|
||||||
|
* UPDATE_TYPE_FAST is used for really fast updates that do not require much
|
||||||
|
* logical calculations or hardware register programming. This update MUST be
|
||||||
|
* ISR safe on windows. Currently fast update will only be used to flip surface
|
||||||
|
* address.
|
||||||
|
*
|
||||||
|
* UPDATE_TYPE_MED is used for slower updates which require significant hw
|
||||||
|
* re-programming however do not affect bandwidth consumption or clock
|
||||||
|
* requirements. At present, this is the level at which front end updates
|
||||||
|
* that do not require us to run bw_calcs happen. These are in/out transfer func
|
||||||
|
* updates, viewport offset changes, recout size changes and pixel depth changes.
|
||||||
|
* This update can be done at ISR, but we want to minimize how often this happens.
|
||||||
|
*
|
||||||
|
* UPDATE_TYPE_FULL is slow. Really slow. This requires us to recalculate our
|
||||||
|
* bandwidth and clocks, possibly rearrange some pipes and reprogram anything front
|
||||||
|
* end related. Any time viewport dimensions, recout dimensions, scaling ratios or
|
||||||
|
* gamma need to be adjusted or pipe needs to be turned on (or disconnected) we do
|
||||||
|
* a full update. This cannot be done at ISR level and should be a rare event.
|
||||||
|
* Unless someone is stress testing mpo enter/exit, playing with colour or adjusting
|
||||||
|
* underscan we don't expect to see this call at all.
|
||||||
|
*/
|
||||||
|
|
||||||
|
enum surface_update_type {
|
||||||
|
UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
|
||||||
|
UPDATE_TYPE_MED, /* ISR safe, most of programming needed, no bw/clk change*/
|
||||||
|
UPDATE_TYPE_FULL, /* may need to shuffle resources */
|
||||||
|
};
|
||||||
|
|
||||||
/* Forward declaration*/
|
/* Forward declaration*/
|
||||||
struct dc;
|
struct dc;
|
||||||
struct dc_plane_state;
|
struct dc_plane_state;
|
||||||
struct dc_state;
|
struct dc_state;
|
||||||
|
|
||||||
|
|
||||||
struct dc_cap_funcs {
|
struct dc_cap_funcs {
|
||||||
bool (*get_dcc_compression_cap)(const struct dc *dc,
|
bool (*get_dcc_compression_cap)(const struct dc *dc,
|
||||||
const struct dc_dcc_surface_param *input,
|
const struct dc_dcc_surface_param *input,
|
||||||
struct dc_surface_dcc_cap *output);
|
struct dc_surface_dcc_cap *output);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dc_stream_state_funcs {
|
|
||||||
bool (*adjust_vmin_vmax)(struct dc *dc,
|
|
||||||
struct dc_stream_state **stream,
|
|
||||||
int num_streams,
|
|
||||||
int vmin,
|
|
||||||
int vmax);
|
|
||||||
bool (*get_crtc_position)(struct dc *dc,
|
|
||||||
struct dc_stream_state **stream,
|
|
||||||
int num_streams,
|
|
||||||
unsigned int *v_pos,
|
|
||||||
unsigned int *nom_v_pos);
|
|
||||||
|
|
||||||
bool (*set_gamut_remap)(struct dc *dc,
|
|
||||||
const struct dc_stream_state *stream);
|
|
||||||
|
|
||||||
bool (*program_csc_matrix)(struct dc *dc,
|
|
||||||
struct dc_stream_state *stream);
|
|
||||||
|
|
||||||
void (*set_static_screen_events)(struct dc *dc,
|
|
||||||
struct dc_stream_state **stream,
|
|
||||||
int num_streams,
|
|
||||||
const struct dc_static_screen_events *events);
|
|
||||||
|
|
||||||
void (*set_dither_option)(struct dc_stream_state *stream,
|
|
||||||
enum dc_dither_option option);
|
|
||||||
|
|
||||||
void (*set_dpms)(struct dc *dc,
|
|
||||||
struct dc_stream_state *stream,
|
|
||||||
bool dpms_off);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct link_training_settings;
|
struct link_training_settings;
|
||||||
|
|
||||||
struct dc_link_funcs {
|
|
||||||
void (*set_drive_settings)(struct dc *dc,
|
|
||||||
struct link_training_settings *lt_settings,
|
|
||||||
const struct dc_link *link);
|
|
||||||
void (*perform_link_training)(struct dc *dc,
|
|
||||||
struct dc_link_settings *link_setting,
|
|
||||||
bool skip_video_pattern);
|
|
||||||
void (*set_preferred_link_settings)(struct dc *dc,
|
|
||||||
struct dc_link_settings *link_setting,
|
|
||||||
struct dc_link *link);
|
|
||||||
void (*enable_hpd)(const struct dc_link *link);
|
|
||||||
void (*disable_hpd)(const struct dc_link *link);
|
|
||||||
void (*set_test_pattern)(
|
|
||||||
struct dc_link *link,
|
|
||||||
enum dp_test_pattern test_pattern,
|
|
||||||
const struct link_training_settings *p_link_settings,
|
|
||||||
const unsigned char *p_custom_pattern,
|
|
||||||
unsigned int cust_pattern_size);
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Structure to hold configuration flags set by dm at dc creation. */
|
/* Structure to hold configuration flags set by dm at dc creation. */
|
||||||
struct dc_config {
|
struct dc_config {
|
||||||
|
@ -232,8 +218,6 @@ struct dce_hwseq;
|
||||||
struct dc {
|
struct dc {
|
||||||
struct dc_caps caps;
|
struct dc_caps caps;
|
||||||
struct dc_cap_funcs cap_funcs;
|
struct dc_cap_funcs cap_funcs;
|
||||||
struct dc_stream_state_funcs stream_funcs;
|
|
||||||
struct dc_link_funcs link_funcs;
|
|
||||||
struct dc_config config;
|
struct dc_config config;
|
||||||
struct dc_debug debug;
|
struct dc_debug debug;
|
||||||
|
|
||||||
|
@ -333,24 +317,6 @@ enum color_transfer_func {
|
||||||
transfer_func_gamma_26
|
transfer_func_gamma_26
|
||||||
};
|
};
|
||||||
|
|
||||||
enum color_color_space {
|
|
||||||
color_space_unsupported,
|
|
||||||
color_space_srgb,
|
|
||||||
color_space_bt601,
|
|
||||||
color_space_bt709,
|
|
||||||
color_space_xv_ycc_bt601,
|
|
||||||
color_space_xv_ycc_bt709,
|
|
||||||
color_space_xr_rgb,
|
|
||||||
color_space_bt2020,
|
|
||||||
color_space_adobe,
|
|
||||||
color_space_dci_p3,
|
|
||||||
color_space_sc_rgb_ms_ref,
|
|
||||||
color_space_display_native,
|
|
||||||
color_space_app_ctrl,
|
|
||||||
color_space_dolby_vision,
|
|
||||||
color_space_custom_coordinates
|
|
||||||
};
|
|
||||||
|
|
||||||
struct dc_hdr_static_metadata {
|
struct dc_hdr_static_metadata {
|
||||||
/* display chromaticities and white point in units of 0.00001 */
|
/* display chromaticities and white point in units of 0.00001 */
|
||||||
unsigned int chromaticity_green_x;
|
unsigned int chromaticity_green_x;
|
||||||
|
@ -415,6 +381,33 @@ struct dc_plane_status {
|
||||||
bool is_right_eye;
|
bool is_right_eye;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
union surface_update_flags {
|
||||||
|
|
||||||
|
struct {
|
||||||
|
/* Medium updates */
|
||||||
|
uint32_t color_space_change:1;
|
||||||
|
uint32_t input_tf_change:1;
|
||||||
|
uint32_t horizontal_mirror_change:1;
|
||||||
|
uint32_t per_pixel_alpha_change:1;
|
||||||
|
uint32_t rotation_change:1;
|
||||||
|
uint32_t swizzle_change:1;
|
||||||
|
uint32_t scaling_change:1;
|
||||||
|
uint32_t position_change:1;
|
||||||
|
uint32_t in_transfer_func:1;
|
||||||
|
uint32_t input_csc_change:1;
|
||||||
|
|
||||||
|
/* Full updates */
|
||||||
|
uint32_t new_plane:1;
|
||||||
|
uint32_t bpp_change:1;
|
||||||
|
uint32_t bandwidth_change:1;
|
||||||
|
uint32_t clock_change:1;
|
||||||
|
uint32_t stereo_format_change:1;
|
||||||
|
uint32_t full_update:1;
|
||||||
|
} bits;
|
||||||
|
|
||||||
|
uint32_t raw;
|
||||||
|
};
|
||||||
|
|
||||||
struct dc_plane_state {
|
struct dc_plane_state {
|
||||||
struct dc_plane_address address;
|
struct dc_plane_address address;
|
||||||
struct scaling_taps scaling_quality;
|
struct scaling_taps scaling_quality;
|
||||||
|
@ -426,18 +419,19 @@ struct dc_plane_state {
|
||||||
union dc_tiling_info tiling_info;
|
union dc_tiling_info tiling_info;
|
||||||
|
|
||||||
struct dc_plane_dcc_param dcc;
|
struct dc_plane_dcc_param dcc;
|
||||||
struct dc_hdr_static_metadata hdr_static_ctx;
|
|
||||||
|
|
||||||
struct dc_gamma *gamma_correction;
|
struct dc_gamma *gamma_correction;
|
||||||
struct dc_transfer_func *in_transfer_func;
|
struct dc_transfer_func *in_transfer_func;
|
||||||
|
struct dc_bias_and_scale *bias_and_scale;
|
||||||
|
struct csc_transform input_csc_color_matrix;
|
||||||
|
struct fixed31_32 coeff_reduction_factor;
|
||||||
|
|
||||||
// sourceContentAttribute cache
|
// TODO: No longer used, remove
|
||||||
bool is_source_input_valid;
|
struct dc_hdr_static_metadata hdr_static_ctx;
|
||||||
struct dc_hdr_static_metadata source_input_mastering_info;
|
|
||||||
enum color_color_space source_input_color_space;
|
|
||||||
enum color_transfer_func source_input_tf;
|
|
||||||
|
|
||||||
enum dc_color_space color_space;
|
enum dc_color_space color_space;
|
||||||
|
enum color_transfer_func input_tf;
|
||||||
|
|
||||||
enum surface_pixel_format format;
|
enum surface_pixel_format format;
|
||||||
enum dc_rotation_angle rotation;
|
enum dc_rotation_angle rotation;
|
||||||
enum plane_stereo_format stereo_format;
|
enum plane_stereo_format stereo_format;
|
||||||
|
@ -447,6 +441,7 @@ struct dc_plane_state {
|
||||||
bool flip_immediate;
|
bool flip_immediate;
|
||||||
bool horizontal_mirror;
|
bool horizontal_mirror;
|
||||||
|
|
||||||
|
union surface_update_flags update_flags;
|
||||||
/* private to DC core */
|
/* private to DC core */
|
||||||
struct dc_plane_status status;
|
struct dc_plane_status status;
|
||||||
struct dc_context *ctx;
|
struct dc_context *ctx;
|
||||||
|
@ -463,10 +458,12 @@ struct dc_plane_info {
|
||||||
enum surface_pixel_format format;
|
enum surface_pixel_format format;
|
||||||
enum dc_rotation_angle rotation;
|
enum dc_rotation_angle rotation;
|
||||||
enum plane_stereo_format stereo_format;
|
enum plane_stereo_format stereo_format;
|
||||||
enum dc_color_space color_space; /*todo: wrong place, fits in scaling info*/
|
enum dc_color_space color_space;
|
||||||
|
enum color_transfer_func input_tf;
|
||||||
bool horizontal_mirror;
|
bool horizontal_mirror;
|
||||||
bool visible;
|
bool visible;
|
||||||
bool per_pixel_alpha;
|
bool per_pixel_alpha;
|
||||||
|
bool input_csc_enabled;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dc_scaling_info {
|
struct dc_scaling_info {
|
||||||
|
@ -483,13 +480,18 @@ struct dc_surface_update {
|
||||||
struct dc_flip_addrs *flip_addr;
|
struct dc_flip_addrs *flip_addr;
|
||||||
struct dc_plane_info *plane_info;
|
struct dc_plane_info *plane_info;
|
||||||
struct dc_scaling_info *scaling_info;
|
struct dc_scaling_info *scaling_info;
|
||||||
|
|
||||||
/* following updates require alloc/sleep/spin that is not isr safe,
|
/* following updates require alloc/sleep/spin that is not isr safe,
|
||||||
* null means no updates
|
* null means no updates
|
||||||
*/
|
*/
|
||||||
/* gamma TO BE REMOVED */
|
/* gamma TO BE REMOVED */
|
||||||
struct dc_gamma *gamma;
|
struct dc_gamma *gamma;
|
||||||
|
enum color_transfer_func color_input_tf;
|
||||||
|
enum color_transfer_func color_output_tf;
|
||||||
struct dc_transfer_func *in_transfer_func;
|
struct dc_transfer_func *in_transfer_func;
|
||||||
struct dc_hdr_static_metadata *hdr_static_metadata;
|
|
||||||
|
struct csc_transform *input_csc_color_matrix;
|
||||||
|
struct fixed31_32 *coeff_reduction_factor;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -524,197 +526,7 @@ struct dc_flip_addrs {
|
||||||
bool dc_post_update_surfaces_to_stream(
|
bool dc_post_update_surfaces_to_stream(
|
||||||
struct dc *dc);
|
struct dc *dc);
|
||||||
|
|
||||||
/* Surface update type is used by dc_update_surfaces_and_stream
|
#include "dc_stream.h"
|
||||||
* The update type is determined at the very beginning of the function based
|
|
||||||
* on parameters passed in and decides how much programming (or updating) is
|
|
||||||
* going to be done during the call.
|
|
||||||
*
|
|
||||||
* UPDATE_TYPE_FAST is used for really fast updates that do not require much
|
|
||||||
* logical calculations or hardware register programming. This update MUST be
|
|
||||||
* ISR safe on windows. Currently fast update will only be used to flip surface
|
|
||||||
* address.
|
|
||||||
*
|
|
||||||
* UPDATE_TYPE_MED is used for slower updates which require significant hw
|
|
||||||
* re-programming however do not affect bandwidth consumption or clock
|
|
||||||
* requirements. At present, this is the level at which front end updates
|
|
||||||
* that do not require us to run bw_calcs happen. These are in/out transfer func
|
|
||||||
* updates, viewport offset changes, recout size changes and pixel depth changes.
|
|
||||||
* This update can be done at ISR, but we want to minimize how often this happens.
|
|
||||||
*
|
|
||||||
* UPDATE_TYPE_FULL is slow. Really slow. This requires us to recalculate our
|
|
||||||
* bandwidth and clocks, possibly rearrange some pipes and reprogram anything front
|
|
||||||
* end related. Any time viewport dimensions, recout dimensions, scaling ratios or
|
|
||||||
* gamma need to be adjusted or pipe needs to be turned on (or disconnected) we do
|
|
||||||
* a full update. This cannot be done at ISR level and should be a rare event.
|
|
||||||
* Unless someone is stress testing mpo enter/exit, playing with colour or adjusting
|
|
||||||
* underscan we don't expect to see this call at all.
|
|
||||||
*/
|
|
||||||
|
|
||||||
enum surface_update_type {
|
|
||||||
UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
|
|
||||||
UPDATE_TYPE_MED, /* ISR safe, most of programming needed, no bw/clk change*/
|
|
||||||
UPDATE_TYPE_FULL, /* may need to shuffle resources */
|
|
||||||
};
|
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
* Stream Interfaces
|
|
||||||
******************************************************************************/
|
|
||||||
|
|
||||||
struct dc_stream_status {
|
|
||||||
int primary_otg_inst;
|
|
||||||
int stream_enc_inst;
|
|
||||||
int plane_count;
|
|
||||||
struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
|
|
||||||
|
|
||||||
/*
|
|
||||||
* link this stream passes through
|
|
||||||
*/
|
|
||||||
struct dc_link *link;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct dc_stream_state {
|
|
||||||
struct dc_sink *sink;
|
|
||||||
struct dc_crtc_timing timing;
|
|
||||||
|
|
||||||
struct rect src; /* composition area */
|
|
||||||
struct rect dst; /* stream addressable area */
|
|
||||||
|
|
||||||
struct audio_info audio_info;
|
|
||||||
|
|
||||||
struct freesync_context freesync_ctx;
|
|
||||||
|
|
||||||
struct dc_transfer_func *out_transfer_func;
|
|
||||||
struct colorspace_transform gamut_remap_matrix;
|
|
||||||
struct csc_transform csc_color_matrix;
|
|
||||||
|
|
||||||
enum signal_type output_signal;
|
|
||||||
|
|
||||||
enum dc_color_space output_color_space;
|
|
||||||
enum dc_dither_option dither_option;
|
|
||||||
|
|
||||||
enum view_3d_format view_format;
|
|
||||||
|
|
||||||
bool ignore_msa_timing_param;
|
|
||||||
/* TODO: custom INFO packets */
|
|
||||||
/* TODO: ABM info (DMCU) */
|
|
||||||
/* TODO: PSR info */
|
|
||||||
/* TODO: CEA VIC */
|
|
||||||
|
|
||||||
/* from core_stream struct */
|
|
||||||
struct dc_context *ctx;
|
|
||||||
|
|
||||||
/* used by DCP and FMT */
|
|
||||||
struct bit_depth_reduction_params bit_depth_params;
|
|
||||||
struct clamping_and_pixel_encoding_params clamping;
|
|
||||||
|
|
||||||
int phy_pix_clk;
|
|
||||||
enum signal_type signal;
|
|
||||||
bool dpms_off;
|
|
||||||
|
|
||||||
struct dc_stream_status status;
|
|
||||||
|
|
||||||
struct dc_cursor_attributes cursor_attributes;
|
|
||||||
|
|
||||||
/* from stream struct */
|
|
||||||
struct kref refcount;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct dc_stream_update {
|
|
||||||
struct rect src;
|
|
||||||
struct rect dst;
|
|
||||||
struct dc_transfer_func *out_transfer_func;
|
|
||||||
};
|
|
||||||
|
|
||||||
bool dc_is_stream_unchanged(
|
|
||||||
struct dc_stream_state *old_stream, struct dc_stream_state *stream);
|
|
||||||
bool dc_is_stream_scaling_unchanged(
|
|
||||||
struct dc_stream_state *old_stream, struct dc_stream_state *stream);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set up surface attributes and associate to a stream
|
|
||||||
* The surfaces parameter is an absolute set of all surface active for the stream.
|
|
||||||
* If no surfaces are provided, the stream will be blanked; no memory read.
|
|
||||||
* Any flip related attribute changes must be done through this interface.
|
|
||||||
*
|
|
||||||
* After this call:
|
|
||||||
* Surfaces attributes are programmed and configured to be composed into stream.
|
|
||||||
* This does not trigger a flip. No surface address is programmed.
|
|
||||||
*/
|
|
||||||
|
|
||||||
bool dc_commit_planes_to_stream(
|
|
||||||
struct dc *dc,
|
|
||||||
struct dc_plane_state **plane_states,
|
|
||||||
uint8_t new_plane_count,
|
|
||||||
struct dc_stream_state *dc_stream,
|
|
||||||
struct dc_state *state);
|
|
||||||
|
|
||||||
void dc_commit_updates_for_stream(struct dc *dc,
|
|
||||||
struct dc_surface_update *srf_updates,
|
|
||||||
int surface_count,
|
|
||||||
struct dc_stream_state *stream,
|
|
||||||
struct dc_stream_update *stream_update,
|
|
||||||
struct dc_plane_state **plane_states,
|
|
||||||
struct dc_state *state);
|
|
||||||
/*
|
|
||||||
* Log the current stream state.
|
|
||||||
*/
|
|
||||||
void dc_stream_log(
|
|
||||||
const struct dc_stream_state *stream,
|
|
||||||
struct dal_logger *dc_logger,
|
|
||||||
enum dc_log_type log_type);
|
|
||||||
|
|
||||||
uint8_t dc_get_current_stream_count(struct dc *dc);
|
|
||||||
struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Return the current frame counter.
|
|
||||||
*/
|
|
||||||
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream);
|
|
||||||
|
|
||||||
/* TODO: Return parsed values rather than direct register read
|
|
||||||
* This has a dependency on the caller (amdgpu_get_crtc_scanoutpos)
|
|
||||||
* being refactored properly to be dce-specific
|
|
||||||
*/
|
|
||||||
bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
|
|
||||||
uint32_t *v_blank_start,
|
|
||||||
uint32_t *v_blank_end,
|
|
||||||
uint32_t *h_position,
|
|
||||||
uint32_t *v_position);
|
|
||||||
|
|
||||||
enum dc_status dc_add_stream_to_ctx(
|
|
||||||
struct dc *dc,
|
|
||||||
struct dc_state *new_ctx,
|
|
||||||
struct dc_stream_state *stream);
|
|
||||||
|
|
||||||
enum dc_status dc_remove_stream_from_ctx(
|
|
||||||
struct dc *dc,
|
|
||||||
struct dc_state *new_ctx,
|
|
||||||
struct dc_stream_state *stream);
|
|
||||||
|
|
||||||
|
|
||||||
bool dc_add_plane_to_context(
|
|
||||||
const struct dc *dc,
|
|
||||||
struct dc_stream_state *stream,
|
|
||||||
struct dc_plane_state *plane_state,
|
|
||||||
struct dc_state *context);
|
|
||||||
|
|
||||||
bool dc_remove_plane_from_context(
|
|
||||||
const struct dc *dc,
|
|
||||||
struct dc_stream_state *stream,
|
|
||||||
struct dc_plane_state *plane_state,
|
|
||||||
struct dc_state *context);
|
|
||||||
|
|
||||||
bool dc_rem_all_planes_for_stream(
|
|
||||||
const struct dc *dc,
|
|
||||||
struct dc_stream_state *stream,
|
|
||||||
struct dc_state *context);
|
|
||||||
|
|
||||||
bool dc_add_all_planes_for_stream(
|
|
||||||
const struct dc *dc,
|
|
||||||
struct dc_stream_state *stream,
|
|
||||||
struct dc_plane_state * const *plane_states,
|
|
||||||
int plane_count,
|
|
||||||
struct dc_state *context);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Structure to store surface/stream associations for validation
|
* Structure to store surface/stream associations for validation
|
||||||
|
@ -725,22 +537,12 @@ struct dc_validation_set {
|
||||||
uint8_t plane_count;
|
uint8_t plane_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
|
|
||||||
|
|
||||||
enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
|
enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
|
||||||
|
|
||||||
enum dc_status dc_validate_global_state(
|
enum dc_status dc_validate_global_state(
|
||||||
struct dc *dc,
|
struct dc *dc,
|
||||||
struct dc_state *new_ctx);
|
struct dc_state *new_ctx);
|
||||||
|
|
||||||
/*
|
|
||||||
* This function takes a stream and checks if it is guaranteed to be supported.
|
|
||||||
* Guaranteed means that MAX_COFUNC similar streams are supported.
|
|
||||||
*
|
|
||||||
* After this call:
|
|
||||||
* No hardware is programmed for call. Only validation is done.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
void dc_resource_state_construct(
|
void dc_resource_state_construct(
|
||||||
const struct dc *dc,
|
const struct dc *dc,
|
||||||
|
@ -767,42 +569,6 @@ void dc_resource_state_destruct(struct dc_state *context);
|
||||||
*/
|
*/
|
||||||
bool dc_commit_state(struct dc *dc, struct dc_state *context);
|
bool dc_commit_state(struct dc *dc, struct dc_state *context);
|
||||||
|
|
||||||
/*
|
|
||||||
* Set up streams and links associated to drive sinks
|
|
||||||
* The streams parameter is an absolute set of all active streams.
|
|
||||||
*
|
|
||||||
* After this call:
|
|
||||||
* Phy, Encoder, Timing Generator are programmed and enabled.
|
|
||||||
* New streams are enabled with blank stream; no memory read.
|
|
||||||
*/
|
|
||||||
/*
|
|
||||||
* Enable stereo when commit_streams is not required,
|
|
||||||
* for example, frame alternate.
|
|
||||||
*/
|
|
||||||
bool dc_enable_stereo(
|
|
||||||
struct dc *dc,
|
|
||||||
struct dc_state *context,
|
|
||||||
struct dc_stream_state *streams[],
|
|
||||||
uint8_t stream_count);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a new default stream for the requested sink
|
|
||||||
*/
|
|
||||||
struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
|
|
||||||
|
|
||||||
void dc_stream_retain(struct dc_stream_state *dc_stream);
|
|
||||||
void dc_stream_release(struct dc_stream_state *dc_stream);
|
|
||||||
|
|
||||||
struct dc_stream_status *dc_stream_get_status(
|
|
||||||
struct dc_stream_state *dc_stream);
|
|
||||||
|
|
||||||
enum surface_update_type dc_check_update_surfaces_for_stream(
|
|
||||||
struct dc *dc,
|
|
||||||
struct dc_surface_update *updates,
|
|
||||||
int surface_count,
|
|
||||||
struct dc_stream_update *stream_update,
|
|
||||||
const struct dc_stream_status *stream_status);
|
|
||||||
|
|
||||||
|
|
||||||
struct dc_state *dc_create_state(void);
|
struct dc_state *dc_create_state(void);
|
||||||
void dc_retain_state(struct dc_state *context);
|
void dc_retain_state(struct dc_state *context);
|
||||||
|
@ -835,171 +601,7 @@ struct dpcd_caps {
|
||||||
bool dpcd_display_control_capable;
|
bool dpcd_display_control_capable;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dc_link_status {
|
#include "dc_link.h"
|
||||||
struct dpcd_caps *dpcd_caps;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* DP MST stream allocation (payload bandwidth number) */
|
|
||||||
struct link_mst_stream_allocation {
|
|
||||||
/* DIG front */
|
|
||||||
const struct stream_encoder *stream_enc;
|
|
||||||
/* associate DRM payload table with DC stream encoder */
|
|
||||||
uint8_t vcp_id;
|
|
||||||
/* number of slots required for the DP stream in transport packet */
|
|
||||||
uint8_t slot_count;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* DP MST stream allocation table */
|
|
||||||
struct link_mst_stream_allocation_table {
|
|
||||||
/* number of DP video streams */
|
|
||||||
int stream_count;
|
|
||||||
/* array of stream allocations */
|
|
||||||
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A link contains one or more sinks and their connected status.
|
|
||||||
* The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
|
|
||||||
*/
|
|
||||||
struct dc_link {
|
|
||||||
struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
|
|
||||||
unsigned int sink_count;
|
|
||||||
struct dc_sink *local_sink;
|
|
||||||
unsigned int link_index;
|
|
||||||
enum dc_connection_type type;
|
|
||||||
enum signal_type connector_signal;
|
|
||||||
enum dc_irq_source irq_source_hpd;
|
|
||||||
enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
|
|
||||||
/* caps is the same as reported_link_cap. link_traing use
|
|
||||||
* reported_link_cap. Will clean up. TODO
|
|
||||||
*/
|
|
||||||
struct dc_link_settings reported_link_cap;
|
|
||||||
struct dc_link_settings verified_link_cap;
|
|
||||||
struct dc_link_settings cur_link_settings;
|
|
||||||
struct dc_lane_settings cur_lane_setting;
|
|
||||||
struct dc_link_settings preferred_link_setting;
|
|
||||||
|
|
||||||
uint8_t ddc_hw_inst;
|
|
||||||
|
|
||||||
uint8_t hpd_src;
|
|
||||||
|
|
||||||
uint8_t link_enc_hw_inst;
|
|
||||||
|
|
||||||
bool test_pattern_enabled;
|
|
||||||
union compliance_test_state compliance_test_state;
|
|
||||||
|
|
||||||
void *priv;
|
|
||||||
|
|
||||||
struct ddc_service *ddc;
|
|
||||||
|
|
||||||
bool aux_mode;
|
|
||||||
|
|
||||||
/* Private to DC core */
|
|
||||||
|
|
||||||
const struct dc *dc;
|
|
||||||
|
|
||||||
struct dc_context *ctx;
|
|
||||||
|
|
||||||
struct link_encoder *link_enc;
|
|
||||||
struct graphics_object_id link_id;
|
|
||||||
union ddi_channel_mapping ddi_channel_mapping;
|
|
||||||
struct connector_device_tag_info device_tag;
|
|
||||||
struct dpcd_caps dpcd_caps;
|
|
||||||
unsigned short chip_caps;
|
|
||||||
unsigned int dpcd_sink_count;
|
|
||||||
enum edp_revision edp_revision;
|
|
||||||
bool psr_enabled;
|
|
||||||
|
|
||||||
/* MST record stream using this link */
|
|
||||||
struct link_flags {
|
|
||||||
bool dp_keep_receiver_powered;
|
|
||||||
} wa_flags;
|
|
||||||
struct link_mst_stream_allocation_table mst_stream_alloc_table;
|
|
||||||
|
|
||||||
struct dc_link_status link_status;
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Return an enumerated dc_link. dc_link order is constant and determined at
|
|
||||||
* boot time. They cannot be created or destroyed.
|
|
||||||
* Use dc_get_caps() to get number of links.
|
|
||||||
*/
|
|
||||||
static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
|
|
||||||
{
|
|
||||||
return dc->links[link_index];
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set backlight level of an embedded panel (eDP, LVDS). */
|
|
||||||
bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
|
|
||||||
uint32_t frame_ramp, const struct dc_stream_state *stream);
|
|
||||||
|
|
||||||
bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
|
|
||||||
|
|
||||||
bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
|
|
||||||
|
|
||||||
bool dc_link_setup_psr(struct dc_link *dc_link,
|
|
||||||
const struct dc_stream_state *stream, struct psr_config *psr_config,
|
|
||||||
struct psr_context *psr_context);
|
|
||||||
|
|
||||||
/* Request DC to detect if there is a Panel connected.
|
|
||||||
* boot - If this call is during initial boot.
|
|
||||||
* Return false for any type of detection failure or MST detection
|
|
||||||
* true otherwise. True meaning further action is required (status update
|
|
||||||
* and OS notification).
|
|
||||||
*/
|
|
||||||
enum dc_detect_reason {
|
|
||||||
DETECT_REASON_BOOT,
|
|
||||||
DETECT_REASON_HPD,
|
|
||||||
DETECT_REASON_HPDRX,
|
|
||||||
};
|
|
||||||
|
|
||||||
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
|
|
||||||
|
|
||||||
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
|
|
||||||
* Return:
|
|
||||||
* true - Downstream port status changed. DM should call DC to do the
|
|
||||||
* detection.
|
|
||||||
* false - no change in Downstream port status. No further action required
|
|
||||||
* from DM. */
|
|
||||||
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
|
|
||||||
union hpd_irq_data *hpd_irq_dpcd_data);
|
|
||||||
|
|
||||||
struct dc_sink_init_data;
|
|
||||||
|
|
||||||
struct dc_sink *dc_link_add_remote_sink(
|
|
||||||
struct dc_link *dc_link,
|
|
||||||
const uint8_t *edid,
|
|
||||||
int len,
|
|
||||||
struct dc_sink_init_data *init_data);
|
|
||||||
|
|
||||||
void dc_link_remove_remote_sink(
|
|
||||||
struct dc_link *link,
|
|
||||||
struct dc_sink *sink);
|
|
||||||
|
|
||||||
/* Used by diagnostics for virtual link at the moment */
|
|
||||||
|
|
||||||
void dc_link_dp_set_drive_settings(
|
|
||||||
struct dc_link *link,
|
|
||||||
struct link_training_settings *lt_settings);
|
|
||||||
|
|
||||||
enum link_training_result dc_link_dp_perform_link_training(
|
|
||||||
struct dc_link *link,
|
|
||||||
const struct dc_link_settings *link_setting,
|
|
||||||
bool skip_video_pattern);
|
|
||||||
|
|
||||||
void dc_link_dp_enable_hpd(const struct dc_link *link);
|
|
||||||
|
|
||||||
void dc_link_dp_disable_hpd(const struct dc_link *link);
|
|
||||||
|
|
||||||
bool dc_link_dp_set_test_pattern(
|
|
||||||
struct dc_link *link,
|
|
||||||
enum dp_test_pattern test_pattern,
|
|
||||||
const struct link_training_settings *p_link_settings,
|
|
||||||
const unsigned char *p_custom_pattern,
|
|
||||||
unsigned int cust_pattern_size);
|
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Sink Interfaces - A sink corresponds to a display output device
|
* Sink Interfaces - A sink corresponds to a display output device
|
||||||
|
@ -1037,6 +639,7 @@ struct dc_sink {
|
||||||
|
|
||||||
/* private to dc_sink.c */
|
/* private to dc_sink.c */
|
||||||
struct kref refcount;
|
struct kref refcount;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void dc_sink_retain(struct dc_sink *sink);
|
void dc_sink_retain(struct dc_sink *sink);
|
||||||
|
@ -1051,18 +654,6 @@ struct dc_sink_init_data {
|
||||||
|
|
||||||
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
|
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
* Cursor interfaces - To manages the cursor within a stream
|
|
||||||
******************************************************************************/
|
|
||||||
/* TODO: Deprecated once we switch to dc_set_cursor_position */
|
|
||||||
bool dc_stream_set_cursor_attributes(
|
|
||||||
struct dc_stream_state *stream,
|
|
||||||
const struct dc_cursor_attributes *attributes);
|
|
||||||
|
|
||||||
bool dc_stream_set_cursor_position(
|
|
||||||
struct dc_stream_state *stream,
|
|
||||||
const struct dc_cursor_position *position);
|
|
||||||
|
|
||||||
/* Newer interfaces */
|
/* Newer interfaces */
|
||||||
struct dc_cursor {
|
struct dc_cursor {
|
||||||
struct dc_plane_address address;
|
struct dc_plane_address address;
|
||||||
|
@ -1090,14 +681,4 @@ void dc_set_power_state(
|
||||||
enum dc_acpi_cm_power_state power_state);
|
enum dc_acpi_cm_power_state power_state);
|
||||||
void dc_resume(struct dc *dc);
|
void dc_resume(struct dc *dc);
|
||||||
|
|
||||||
/*
|
|
||||||
* DPCD access interfaces
|
|
||||||
*/
|
|
||||||
|
|
||||||
bool dc_submit_i2c(
|
|
||||||
struct dc *dc,
|
|
||||||
uint32_t link_index,
|
|
||||||
struct i2c_command *cmd);
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* DC_INTERFACE_H_ */
|
#endif /* DC_INTERFACE_H_ */
|
||||||
|
|
|
@ -255,7 +255,7 @@ enum dpcd_downstream_port_detailed_type {
|
||||||
DOWN_STREAM_DETAILED_DP_PLUS_PLUS
|
DOWN_STREAM_DETAILED_DP_PLUS_PLUS
|
||||||
};
|
};
|
||||||
|
|
||||||
union dwnstream_port_caps_byte1 {
|
union dwnstream_port_caps_byte2 {
|
||||||
struct {
|
struct {
|
||||||
uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
|
uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
|
||||||
uint8_t RESERVED:6;
|
uint8_t RESERVED:6;
|
||||||
|
@ -298,6 +298,32 @@ union dwnstream_port_caps_byte3_hdmi {
|
||||||
|
|
||||||
/*4-byte structure for detailed capabilities of a down-stream port
|
/*4-byte structure for detailed capabilities of a down-stream port
|
||||||
(DP-to-TMDS converter).*/
|
(DP-to-TMDS converter).*/
|
||||||
|
union dwnstream_portxcaps {
|
||||||
|
struct {
|
||||||
|
union dwnstream_port_caps_byte0 byte0;
|
||||||
|
unsigned char max_TMDS_clock; //byte1
|
||||||
|
union dwnstream_port_caps_byte2 byte2;
|
||||||
|
|
||||||
|
union {
|
||||||
|
union dwnstream_port_caps_byte3_dvi byteDVI;
|
||||||
|
union dwnstream_port_caps_byte3_hdmi byteHDMI;
|
||||||
|
} byte3;
|
||||||
|
} bytes;
|
||||||
|
|
||||||
|
unsigned char raw[4];
|
||||||
|
};
|
||||||
|
|
||||||
|
union downstream_port {
|
||||||
|
struct {
|
||||||
|
unsigned char present:1;
|
||||||
|
unsigned char type:2;
|
||||||
|
unsigned char format_conv:1;
|
||||||
|
unsigned char detailed_caps:1;
|
||||||
|
unsigned char reserved:3;
|
||||||
|
} bits;
|
||||||
|
unsigned char raw;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
union sink_status {
|
union sink_status {
|
||||||
struct {
|
struct {
|
||||||
|
|
|
@ -156,8 +156,13 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
|
||||||
|
|
||||||
field_value = get_reg_field_value_ex(reg_val, mask, shift);
|
field_value = get_reg_field_value_ex(reg_val, mask, shift);
|
||||||
|
|
||||||
if (field_value == condition_value)
|
if (field_value == condition_value) {
|
||||||
|
if (i * delay_between_poll_us > 1000)
|
||||||
|
dm_output_to_console("REG_WAIT taking a while: %dms in %s line:%d\n",
|
||||||
|
delay_between_poll_us * i / 1000,
|
||||||
|
func_name, line);
|
||||||
return reg_val;
|
return reg_val;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
|
dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
|
||||||
|
|
|
@ -492,15 +492,24 @@ struct dc_cursor_attributes {
|
||||||
enum dc_color_space {
|
enum dc_color_space {
|
||||||
COLOR_SPACE_UNKNOWN,
|
COLOR_SPACE_UNKNOWN,
|
||||||
COLOR_SPACE_SRGB,
|
COLOR_SPACE_SRGB,
|
||||||
|
COLOR_SPACE_XR_RGB,
|
||||||
COLOR_SPACE_SRGB_LIMITED,
|
COLOR_SPACE_SRGB_LIMITED,
|
||||||
|
COLOR_SPACE_MSREF_SCRGB,
|
||||||
COLOR_SPACE_YCBCR601,
|
COLOR_SPACE_YCBCR601,
|
||||||
COLOR_SPACE_YCBCR709,
|
COLOR_SPACE_YCBCR709,
|
||||||
|
COLOR_SPACE_XV_YCC_709,
|
||||||
|
COLOR_SPACE_XV_YCC_601,
|
||||||
COLOR_SPACE_YCBCR601_LIMITED,
|
COLOR_SPACE_YCBCR601_LIMITED,
|
||||||
COLOR_SPACE_YCBCR709_LIMITED,
|
COLOR_SPACE_YCBCR709_LIMITED,
|
||||||
COLOR_SPACE_2020_RGB_FULLRANGE,
|
COLOR_SPACE_2020_RGB_FULLRANGE,
|
||||||
COLOR_SPACE_2020_RGB_LIMITEDRANGE,
|
COLOR_SPACE_2020_RGB_LIMITEDRANGE,
|
||||||
COLOR_SPACE_2020_YCBCR,
|
COLOR_SPACE_2020_YCBCR,
|
||||||
COLOR_SPACE_ADOBERGB,
|
COLOR_SPACE_ADOBERGB,
|
||||||
|
COLOR_SPACE_DCIP3,
|
||||||
|
COLOR_SPACE_DISPLAYNATIVE,
|
||||||
|
COLOR_SPACE_DOLBYVISION,
|
||||||
|
COLOR_SPACE_APPCTRL,
|
||||||
|
COLOR_SPACE_CUSTOMPOINTS,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum dc_dither_option {
|
enum dc_dither_option {
|
||||||
|
@ -664,6 +673,22 @@ enum dc_timing_3d_format {
|
||||||
TIMING_3D_FORMAT_MAX,
|
TIMING_3D_FORMAT_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum trigger_delay {
|
||||||
|
TRIGGER_DELAY_NEXT_PIXEL = 0,
|
||||||
|
TRIGGER_DELAY_NEXT_LINE,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum crtc_event {
|
||||||
|
CRTC_EVENT_VSYNC_RISING = 0,
|
||||||
|
CRTC_EVENT_VSYNC_FALLING
|
||||||
|
};
|
||||||
|
|
||||||
|
struct crtc_trigger_info {
|
||||||
|
bool enabled;
|
||||||
|
struct dc_stream_state *event_source;
|
||||||
|
enum crtc_event event;
|
||||||
|
enum trigger_delay delay;
|
||||||
|
};
|
||||||
|
|
||||||
struct dc_crtc_timing {
|
struct dc_crtc_timing {
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,207 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-14 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
* Authors: AMD
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef DC_LINK_H_
|
||||||
|
#define DC_LINK_H_
|
||||||
|
|
||||||
|
#include "dc_types.h"
|
||||||
|
#include "grph_object_defs.h"
|
||||||
|
|
||||||
|
struct dc_link_status {
|
||||||
|
struct dpcd_caps *dpcd_caps;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* DP MST stream allocation (payload bandwidth number) */
|
||||||
|
struct link_mst_stream_allocation {
|
||||||
|
/* DIG front */
|
||||||
|
const struct stream_encoder *stream_enc;
|
||||||
|
/* associate DRM payload table with DC stream encoder */
|
||||||
|
uint8_t vcp_id;
|
||||||
|
/* number of slots required for the DP stream in transport packet */
|
||||||
|
uint8_t slot_count;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* DP MST stream allocation table */
|
||||||
|
struct link_mst_stream_allocation_table {
|
||||||
|
/* number of DP video streams */
|
||||||
|
int stream_count;
|
||||||
|
/* array of stream allocations */
|
||||||
|
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A link contains one or more sinks and their connected status.
|
||||||
|
* The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
|
||||||
|
*/
|
||||||
|
struct dc_link {
|
||||||
|
struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
|
||||||
|
unsigned int sink_count;
|
||||||
|
struct dc_sink *local_sink;
|
||||||
|
unsigned int link_index;
|
||||||
|
enum dc_connection_type type;
|
||||||
|
enum signal_type connector_signal;
|
||||||
|
enum dc_irq_source irq_source_hpd;
|
||||||
|
enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
|
||||||
|
/* caps is the same as reported_link_cap. link_traing use
|
||||||
|
* reported_link_cap. Will clean up. TODO
|
||||||
|
*/
|
||||||
|
struct dc_link_settings reported_link_cap;
|
||||||
|
struct dc_link_settings verified_link_cap;
|
||||||
|
struct dc_link_settings cur_link_settings;
|
||||||
|
struct dc_lane_settings cur_lane_setting;
|
||||||
|
struct dc_link_settings preferred_link_setting;
|
||||||
|
|
||||||
|
uint8_t ddc_hw_inst;
|
||||||
|
|
||||||
|
uint8_t hpd_src;
|
||||||
|
|
||||||
|
uint8_t link_enc_hw_inst;
|
||||||
|
|
||||||
|
bool test_pattern_enabled;
|
||||||
|
union compliance_test_state compliance_test_state;
|
||||||
|
|
||||||
|
void *priv;
|
||||||
|
|
||||||
|
struct ddc_service *ddc;
|
||||||
|
|
||||||
|
bool aux_mode;
|
||||||
|
|
||||||
|
/* Private to DC core */
|
||||||
|
|
||||||
|
const struct dc *dc;
|
||||||
|
|
||||||
|
struct dc_context *ctx;
|
||||||
|
|
||||||
|
struct link_encoder *link_enc;
|
||||||
|
struct graphics_object_id link_id;
|
||||||
|
union ddi_channel_mapping ddi_channel_mapping;
|
||||||
|
struct connector_device_tag_info device_tag;
|
||||||
|
struct dpcd_caps dpcd_caps;
|
||||||
|
unsigned short chip_caps;
|
||||||
|
unsigned int dpcd_sink_count;
|
||||||
|
enum edp_revision edp_revision;
|
||||||
|
bool psr_enabled;
|
||||||
|
|
||||||
|
/* MST record stream using this link */
|
||||||
|
struct link_flags {
|
||||||
|
bool dp_keep_receiver_powered;
|
||||||
|
} wa_flags;
|
||||||
|
struct link_mst_stream_allocation_table mst_stream_alloc_table;
|
||||||
|
|
||||||
|
struct dc_link_status link_status;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return an enumerated dc_link. dc_link order is constant and determined at
|
||||||
|
* boot time. They cannot be created or destroyed.
|
||||||
|
* Use dc_get_caps() to get number of links.
|
||||||
|
*/
|
||||||
|
static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
|
||||||
|
{
|
||||||
|
return dc->links[link_index];
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set backlight level of an embedded panel (eDP, LVDS). */
|
||||||
|
bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
|
||||||
|
uint32_t frame_ramp, const struct dc_stream_state *stream);
|
||||||
|
|
||||||
|
bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
|
||||||
|
|
||||||
|
bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
|
||||||
|
|
||||||
|
bool dc_link_setup_psr(struct dc_link *dc_link,
|
||||||
|
const struct dc_stream_state *stream, struct psr_config *psr_config,
|
||||||
|
struct psr_context *psr_context);
|
||||||
|
|
||||||
|
/* Request DC to detect if there is a Panel connected.
|
||||||
|
* boot - If this call is during initial boot.
|
||||||
|
* Return false for any type of detection failure or MST detection
|
||||||
|
* true otherwise. True meaning further action is required (status update
|
||||||
|
* and OS notification).
|
||||||
|
*/
|
||||||
|
enum dc_detect_reason {
|
||||||
|
DETECT_REASON_BOOT,
|
||||||
|
DETECT_REASON_HPD,
|
||||||
|
DETECT_REASON_HPDRX,
|
||||||
|
};
|
||||||
|
|
||||||
|
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
|
||||||
|
|
||||||
|
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
|
||||||
|
* Return:
|
||||||
|
* true - Downstream port status changed. DM should call DC to do the
|
||||||
|
* detection.
|
||||||
|
* false - no change in Downstream port status. No further action required
|
||||||
|
* from DM. */
|
||||||
|
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
|
||||||
|
union hpd_irq_data *hpd_irq_dpcd_data);
|
||||||
|
|
||||||
|
struct dc_sink_init_data;
|
||||||
|
|
||||||
|
struct dc_sink *dc_link_add_remote_sink(
|
||||||
|
struct dc_link *dc_link,
|
||||||
|
const uint8_t *edid,
|
||||||
|
int len,
|
||||||
|
struct dc_sink_init_data *init_data);
|
||||||
|
|
||||||
|
void dc_link_remove_remote_sink(
|
||||||
|
struct dc_link *link,
|
||||||
|
struct dc_sink *sink);
|
||||||
|
|
||||||
|
/* Used by diagnostics for virtual link at the moment */
|
||||||
|
|
||||||
|
void dc_link_dp_set_drive_settings(
|
||||||
|
struct dc_link *link,
|
||||||
|
struct link_training_settings *lt_settings);
|
||||||
|
|
||||||
|
enum link_training_result dc_link_dp_perform_link_training(
|
||||||
|
struct dc_link *link,
|
||||||
|
const struct dc_link_settings *link_setting,
|
||||||
|
bool skip_video_pattern);
|
||||||
|
|
||||||
|
void dc_link_dp_enable_hpd(const struct dc_link *link);
|
||||||
|
|
||||||
|
void dc_link_dp_disable_hpd(const struct dc_link *link);
|
||||||
|
|
||||||
|
bool dc_link_dp_set_test_pattern(
|
||||||
|
struct dc_link *link,
|
||||||
|
enum dp_test_pattern test_pattern,
|
||||||
|
const struct link_training_settings *p_link_settings,
|
||||||
|
const unsigned char *p_custom_pattern,
|
||||||
|
unsigned int cust_pattern_size);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* DPCD access interfaces
|
||||||
|
*/
|
||||||
|
|
||||||
|
bool dc_submit_i2c(
|
||||||
|
struct dc *dc,
|
||||||
|
uint32_t link_index,
|
||||||
|
struct i2c_command *cmd);
|
||||||
|
|
||||||
|
#endif /* DC_LINK_H_ */
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue