mirror of https://gitee.com/openkylin/linux.git
Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few more fixes and cleanups for 4.6: - DCE code cleanups - HDP flush/invalidation fixes - GPUVM fixes - switch to drm_vblank_[on|off] - PX fixes - misc bug fixes * 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux: (50 commits) drm/amdgpu: split pipeline sync out of SDMA vm_flush() as well drm/amdgpu: Revert "add mutex for ba_va->valids/invalids" drm/amdgpu: Revert "add lock for interval tree in vm" drm/amdgpu: Revert "add spin lock to protect freed list in vm (v3)" drm/amdgpu: reserve the PD during unmap and remove drm/amdgpu: Fix two bugs in amdgpu_vm_bo_split_mapping drm/radeon: Don't drop DP 2.7 Ghz link setup on some cards. MAINTAINERS: update radeon entry to include amdgpu as well drm/amdgpu: disable runtime pm on PX laptops without dGPU power control drm/radeon: disable runtime pm on PX laptops without dGPU power control drm/amd/amdgpu: Fix indentation in do_set_base() (DCEv8) drm/amd/amdgpu: make afmt_init cleanup if alloc fails (DCEv8) drm/amd/amdgpu: Move config init flag to bottom of sw_init (DCEv8) drm/amd/amdgpu: Don't proceed into audio_fini if audio is disabled (DCEv8) drm/amd/amdgpu: Fix identation in do_set_base() (DCEv10) drm/amd/amdgpu: Make afmt_init cleanup if alloc fails (DCEv10) drm/amd/amdgpu: Move initialized flag to bottom of sw_init (DCEv10) drm/amd/amdgpu: Don't proceed in audio_fini if disabled (DCEv10) drm/amd/amdgpu: Fix indentation in dce_v11_0_crtc_do_set_base() drm/amd/amdgpu: Make afmt_init() cleanup if alloc fails (DCEv11) ...
This commit is contained in:
commit
9f443bf53b
|
@ -3710,7 +3710,7 @@ F: drivers/gpu/vga/
|
|||
F: include/drm/
|
||||
F: include/uapi/drm/
|
||||
|
||||
RADEON DRM DRIVERS
|
||||
RADEON and AMDGPU DRM DRIVERS
|
||||
M: Alex Deucher <alexander.deucher@amd.com>
|
||||
M: Christian König <christian.koenig@amd.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
|
@ -3718,6 +3718,8 @@ T: git git://people.freedesktop.org/~agd5f/linux
|
|||
S: Supported
|
||||
F: drivers/gpu/drm/radeon/
|
||||
F: include/uapi/drm/radeon*
|
||||
F: drivers/gpu/drm/amd/
|
||||
F: include/uapi/drm/amdgpu*
|
||||
|
||||
DRM PANEL DRIVERS
|
||||
M: Thierry Reding <thierry.reding@gmail.com>
|
||||
|
|
|
@ -287,9 +287,11 @@ struct amdgpu_ring_funcs {
|
|||
struct amdgpu_ib *ib);
|
||||
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
|
||||
uint64_t seq, unsigned flags);
|
||||
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
|
||||
void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
|
||||
uint64_t pd_addr);
|
||||
void (*emit_hdp_flush)(struct amdgpu_ring *ring);
|
||||
void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
|
||||
void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
|
||||
uint32_t gds_base, uint32_t gds_size,
|
||||
uint32_t gws_base, uint32_t gws_size,
|
||||
|
@ -369,9 +371,6 @@ struct amdgpu_fence {
|
|||
struct amdgpu_ring *ring;
|
||||
uint64_t seq;
|
||||
|
||||
/* filp or special value for fence creator */
|
||||
void *owner;
|
||||
|
||||
wait_queue_t fence_wake;
|
||||
};
|
||||
|
||||
|
@ -392,8 +391,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
|||
unsigned irq_type);
|
||||
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
|
||||
struct amdgpu_fence **fence);
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
|
||||
void amdgpu_fence_process(struct amdgpu_ring *ring);
|
||||
int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
|
||||
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
|
||||
|
@ -434,6 +432,8 @@ struct amdgpu_bo_list_entry {
|
|||
struct ttm_validate_buffer tv;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
uint32_t priority;
|
||||
struct page **user_pages;
|
||||
int user_invalidated;
|
||||
};
|
||||
|
||||
struct amdgpu_bo_va_mapping {
|
||||
|
@ -445,7 +445,6 @@ struct amdgpu_bo_va_mapping {
|
|||
|
||||
/* bo virtual addresses in a specific vm */
|
||||
struct amdgpu_bo_va {
|
||||
struct mutex mutex;
|
||||
/* protected by bo being reserved */
|
||||
struct list_head bo_list;
|
||||
struct fence *last_pt_update;
|
||||
|
@ -596,6 +595,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
|
|||
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
|
||||
int amdgpu_sync_wait(struct amdgpu_sync *sync);
|
||||
void amdgpu_sync_free(struct amdgpu_sync *sync);
|
||||
int amdgpu_sync_init(void);
|
||||
void amdgpu_sync_fini(void);
|
||||
|
||||
/*
|
||||
* GART structures, functions & helpers
|
||||
|
@ -726,7 +727,7 @@ struct amdgpu_ib {
|
|||
uint32_t length_dw;
|
||||
uint64_t gpu_addr;
|
||||
uint32_t *ptr;
|
||||
struct amdgpu_fence *fence;
|
||||
struct fence *fence;
|
||||
struct amdgpu_user_fence *user;
|
||||
struct amdgpu_vm *vm;
|
||||
unsigned vm_id;
|
||||
|
@ -845,7 +846,6 @@ struct amdgpu_vm_id {
|
|||
|
||||
struct amdgpu_vm {
|
||||
/* tree of virtual addresses mapped */
|
||||
spinlock_t it_lock;
|
||||
struct rb_root va;
|
||||
|
||||
/* protecting invalidated */
|
||||
|
@ -882,6 +882,13 @@ struct amdgpu_vm_manager_id {
|
|||
struct list_head list;
|
||||
struct fence *active;
|
||||
atomic_long_t owner;
|
||||
|
||||
uint32_t gds_base;
|
||||
uint32_t gds_size;
|
||||
uint32_t gws_base;
|
||||
uint32_t gws_size;
|
||||
uint32_t oa_base;
|
||||
uint32_t oa_size;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_manager {
|
||||
|
@ -917,8 +924,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||
struct amdgpu_sync *sync, struct fence *fence,
|
||||
unsigned *vm_id, uint64_t *vm_pd_addr);
|
||||
void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vmid,
|
||||
uint64_t pd_addr);
|
||||
unsigned vm_id, uint64_t pd_addr,
|
||||
uint32_t gds_base, uint32_t gds_size,
|
||||
uint32_t gws_base, uint32_t gws_size,
|
||||
uint32_t oa_base, uint32_t oa_size);
|
||||
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
|
||||
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
|
||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
|
@ -1006,7 +1016,7 @@ struct amdgpu_bo_list {
|
|||
struct amdgpu_bo *gds_obj;
|
||||
struct amdgpu_bo *gws_obj;
|
||||
struct amdgpu_bo *oa_obj;
|
||||
bool has_userptr;
|
||||
unsigned first_userptr;
|
||||
unsigned num_entries;
|
||||
struct amdgpu_bo_list_entry *array;
|
||||
};
|
||||
|
@ -1135,8 +1145,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
unsigned size, struct amdgpu_ib *ib);
|
||||
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
|
||||
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
struct amdgpu_ib *ib, void *owner,
|
||||
struct fence *last_vm_update,
|
||||
struct amdgpu_ib *ib, struct fence *last_vm_update,
|
||||
struct fence **f);
|
||||
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
|
||||
|
@ -2012,7 +2021,6 @@ struct amdgpu_device {
|
|||
struct amdgpu_sdma sdma;
|
||||
|
||||
/* uvd */
|
||||
bool has_uvd;
|
||||
struct amdgpu_uvd uvd;
|
||||
|
||||
/* vce */
|
||||
|
@ -2186,10 +2194,12 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
|||
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
|
||||
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
|
||||
#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
|
||||
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
|
||||
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
|
||||
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
|
||||
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
|
||||
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
|
||||
#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
|
||||
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
|
||||
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
|
||||
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
|
||||
|
@ -2314,12 +2324,15 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
|||
struct amdgpu_ring **out_ring);
|
||||
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
|
||||
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
|
||||
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
||||
uint32_t flags);
|
||||
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
|
||||
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
|
||||
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||
unsigned long end);
|
||||
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
|
||||
int *last_invalidated);
|
||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
|
||||
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *mem);
|
||||
|
|
|
@ -63,6 +63,10 @@ bool amdgpu_has_atpx(void) {
|
|||
return amdgpu_atpx_priv.atpx_detected;
|
||||
}
|
||||
|
||||
bool amdgpu_has_atpx_dgpu_power_cntl(void) {
|
||||
return amdgpu_atpx_priv.atpx.functions.power_cntl;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_call - call an ATPX method
|
||||
*
|
||||
|
@ -142,10 +146,6 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
|
|||
*/
|
||||
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
|
||||
{
|
||||
/* make sure required functions are enabled */
|
||||
/* dGPU power control is required */
|
||||
atpx->functions.power_cntl = true;
|
||||
|
||||
if (atpx->functions.px_params) {
|
||||
union acpi_object *info;
|
||||
struct atpx_px_params output;
|
||||
|
|
|
@ -91,7 +91,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
|
||||
struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
|
||||
|
||||
bool has_userptr = false;
|
||||
unsigned last_entry = 0, first_userptr = num_entries;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
|
@ -101,8 +101,9 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|||
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
|
||||
|
||||
for (i = 0; i < num_entries; ++i) {
|
||||
struct amdgpu_bo_list_entry *entry = &array[i];
|
||||
struct amdgpu_bo_list_entry *entry;
|
||||
struct drm_gem_object *gobj;
|
||||
struct amdgpu_bo *bo;
|
||||
struct mm_struct *usermm;
|
||||
|
||||
gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
|
||||
|
@ -111,19 +112,24 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|||
goto error_free;
|
||||
}
|
||||
|
||||
entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
|
||||
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
entry->priority = min(info[i].bo_priority,
|
||||
AMDGPU_BO_LIST_MAX_PRIORITY);
|
||||
usermm = amdgpu_ttm_tt_get_usermm(entry->robj->tbo.ttm);
|
||||
|
||||
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
|
||||
if (usermm) {
|
||||
if (usermm != current->mm) {
|
||||
amdgpu_bo_unref(&entry->robj);
|
||||
amdgpu_bo_unref(&bo);
|
||||
r = -EPERM;
|
||||
goto error_free;
|
||||
}
|
||||
has_userptr = true;
|
||||
entry = &array[--first_userptr];
|
||||
} else {
|
||||
entry = &array[last_entry++];
|
||||
}
|
||||
|
||||
entry->robj = bo;
|
||||
entry->priority = min(info[i].bo_priority,
|
||||
AMDGPU_BO_LIST_MAX_PRIORITY);
|
||||
entry->tv.bo = &entry->robj->tbo;
|
||||
entry->tv.shared = true;
|
||||
|
||||
|
@ -145,7 +151,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|||
list->gds_obj = gds_obj;
|
||||
list->gws_obj = gws_obj;
|
||||
list->oa_obj = oa_obj;
|
||||
list->has_userptr = has_userptr;
|
||||
list->first_userptr = first_userptr;
|
||||
list->array = array;
|
||||
list->num_entries = num_entries;
|
||||
|
||||
|
@ -194,6 +200,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
|
|||
|
||||
list_add_tail(&list->array[i].tv.head,
|
||||
&bucket[priority]);
|
||||
list->array[i].user_pages = NULL;
|
||||
}
|
||||
|
||||
/* Connect the sorted buckets in the output list. */
|
||||
|
|
|
@ -111,6 +111,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
|||
p->uf_entry.priority = 0;
|
||||
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
|
||||
p->uf_entry.tv.shared = true;
|
||||
p->uf_entry.user_pages = NULL;
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return 0;
|
||||
|
@ -297,6 +298,7 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
|||
|
||||
list_for_each_entry(lobj, validated, tv.head) {
|
||||
struct amdgpu_bo *bo = lobj->robj;
|
||||
bool binding_userptr = false;
|
||||
struct mm_struct *usermm;
|
||||
uint32_t domain;
|
||||
|
||||
|
@ -304,6 +306,15 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
|||
if (usermm && usermm != current->mm)
|
||||
return -EPERM;
|
||||
|
||||
/* Check if we have user pages and nobody bound the BO already */
|
||||
if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
|
||||
size_t size = sizeof(struct page *);
|
||||
|
||||
size *= bo->tbo.ttm->num_pages;
|
||||
memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
|
||||
binding_userptr = true;
|
||||
}
|
||||
|
||||
if (bo->pin_count)
|
||||
continue;
|
||||
|
||||
|
@ -334,6 +345,11 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
|||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
if (binding_userptr) {
|
||||
drm_free_large(lobj->user_pages);
|
||||
lobj->user_pages = NULL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -342,15 +358,18 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||
union drm_amdgpu_cs *cs)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_bo_list_entry *e;
|
||||
struct list_head duplicates;
|
||||
bool need_mmap_lock = false;
|
||||
unsigned i, tries = 10;
|
||||
int r;
|
||||
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
|
||||
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
|
||||
if (p->bo_list) {
|
||||
need_mmap_lock = p->bo_list->has_userptr;
|
||||
need_mmap_lock = p->bo_list->first_userptr !=
|
||||
p->bo_list->num_entries;
|
||||
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
|
||||
}
|
||||
|
||||
|
@ -363,9 +382,81 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||
if (need_mmap_lock)
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
|
||||
if (unlikely(r != 0))
|
||||
goto error_reserve;
|
||||
while (1) {
|
||||
struct list_head need_pages;
|
||||
unsigned i;
|
||||
|
||||
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
|
||||
&duplicates);
|
||||
if (unlikely(r != 0))
|
||||
goto error_free_pages;
|
||||
|
||||
/* Without a BO list we don't have userptr BOs */
|
||||
if (!p->bo_list)
|
||||
break;
|
||||
|
||||
INIT_LIST_HEAD(&need_pages);
|
||||
for (i = p->bo_list->first_userptr;
|
||||
i < p->bo_list->num_entries; ++i) {
|
||||
|
||||
e = &p->bo_list->array[i];
|
||||
|
||||
if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
|
||||
&e->user_invalidated) && e->user_pages) {
|
||||
|
||||
/* We acquired a page array, but somebody
|
||||
* invalidated it. Free it an try again
|
||||
*/
|
||||
release_pages(e->user_pages,
|
||||
e->robj->tbo.ttm->num_pages,
|
||||
false);
|
||||
drm_free_large(e->user_pages);
|
||||
e->user_pages = NULL;
|
||||
}
|
||||
|
||||
if (e->robj->tbo.ttm->state != tt_bound &&
|
||||
!e->user_pages) {
|
||||
list_del(&e->tv.head);
|
||||
list_add(&e->tv.head, &need_pages);
|
||||
|
||||
amdgpu_bo_unreserve(e->robj);
|
||||
}
|
||||
}
|
||||
|
||||
if (list_empty(&need_pages))
|
||||
break;
|
||||
|
||||
/* Unreserve everything again. */
|
||||
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
||||
|
||||
/* We tried to often, just abort */
|
||||
if (!--tries) {
|
||||
r = -EDEADLK;
|
||||
goto error_free_pages;
|
||||
}
|
||||
|
||||
/* Fill the page arrays for all useptrs. */
|
||||
list_for_each_entry(e, &need_pages, tv.head) {
|
||||
struct ttm_tt *ttm = e->robj->tbo.ttm;
|
||||
|
||||
e->user_pages = drm_calloc_large(ttm->num_pages,
|
||||
sizeof(struct page*));
|
||||
if (!e->user_pages) {
|
||||
r = -ENOMEM;
|
||||
goto error_free_pages;
|
||||
}
|
||||
|
||||
r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
|
||||
if (r) {
|
||||
drm_free_large(e->user_pages);
|
||||
e->user_pages = NULL;
|
||||
goto error_free_pages;
|
||||
}
|
||||
}
|
||||
|
||||
/* And try again. */
|
||||
list_splice(&need_pages, &p->validated);
|
||||
}
|
||||
|
||||
amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
|
||||
|
||||
|
@ -397,10 +488,26 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
||||
}
|
||||
|
||||
error_reserve:
|
||||
error_free_pages:
|
||||
|
||||
if (need_mmap_lock)
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
if (p->bo_list) {
|
||||
for (i = p->bo_list->first_userptr;
|
||||
i < p->bo_list->num_entries; ++i) {
|
||||
e = &p->bo_list->array[i];
|
||||
|
||||
if (!e->user_pages)
|
||||
continue;
|
||||
|
||||
release_pages(e->user_pages,
|
||||
e->robj->tbo.ttm->num_pages,
|
||||
false);
|
||||
drm_free_large(e->user_pages);
|
||||
}
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,6 +62,12 @@ static const char *amdgpu_asic_name[] = {
|
|||
"LAST",
|
||||
};
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
bool amdgpu_has_atpx_dgpu_power_cntl(void);
|
||||
#else
|
||||
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
|
||||
#endif
|
||||
|
||||
bool amdgpu_device_is_px(struct drm_device *dev)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
@ -1479,7 +1485,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
|
||||
if (amdgpu_runtime_pm == 1)
|
||||
runtime = true;
|
||||
if (amdgpu_device_is_px(ddev))
|
||||
if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
|
||||
runtime = true;
|
||||
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
|
||||
if (runtime)
|
||||
|
|
|
@ -555,6 +555,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
|
|||
|
||||
static int __init amdgpu_init(void)
|
||||
{
|
||||
amdgpu_sync_init();
|
||||
#ifdef CONFIG_VGA_CONSOLE
|
||||
if (vgacon_text_force()) {
|
||||
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
|
||||
|
@ -577,6 +578,7 @@ static void __exit amdgpu_exit(void)
|
|||
amdgpu_amdkfd_fini();
|
||||
drm_pci_exit(driver, pdriver);
|
||||
amdgpu_unregister_atpx_handler();
|
||||
amdgpu_sync_fini();
|
||||
}
|
||||
|
||||
module_init(amdgpu_init);
|
||||
|
|
|
@ -91,32 +91,29 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
|
|||
* amdgpu_fence_emit - emit a fence on the requested ring
|
||||
*
|
||||
* @ring: ring the fence is associated with
|
||||
* @owner: creator of the fence
|
||||
* @fence: amdgpu fence object
|
||||
* @f: resulting fence object
|
||||
*
|
||||
* Emits a fence command on the requested ring (all asics).
|
||||
* Returns 0 on success, -ENOMEM on failure.
|
||||
*/
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
|
||||
struct amdgpu_fence **fence)
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_fence *fence;
|
||||
|
||||
/* we are protected by the ring emission mutex */
|
||||
*fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
|
||||
if ((*fence) == NULL) {
|
||||
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
|
||||
if (fence == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
(*fence)->seq = ++ring->fence_drv.sync_seq;
|
||||
(*fence)->ring = ring;
|
||||
(*fence)->owner = owner;
|
||||
fence_init(&(*fence)->base, &amdgpu_fence_ops,
|
||||
&ring->fence_drv.fence_queue.lock,
|
||||
adev->fence_context + ring->idx,
|
||||
(*fence)->seq);
|
||||
|
||||
fence->seq = ++ring->fence_drv.sync_seq;
|
||||
fence->ring = ring;
|
||||
fence_init(&fence->base, &amdgpu_fence_ops,
|
||||
&ring->fence_drv.fence_queue.lock,
|
||||
adev->fence_context + ring->idx,
|
||||
fence->seq);
|
||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||
(*fence)->seq,
|
||||
AMDGPU_FENCE_FLAG_INT);
|
||||
fence->seq, AMDGPU_FENCE_FLAG_INT);
|
||||
*f = &fence->base;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -140,25 +140,40 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
|
|||
void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
|
||||
struct amdgpu_device *adev = rbo->adev;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
struct amdgpu_device *adev = bo->adev;
|
||||
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
|
||||
struct amdgpu_bo_list_entry vm_pd;
|
||||
struct list_head list, duplicates;
|
||||
struct ttm_validate_buffer tv;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
int r;
|
||||
r = amdgpu_bo_reserve(rbo, true);
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
|
||||
tv.bo = &bo->tbo;
|
||||
tv.shared = true;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "leaking bo va because "
|
||||
"we fail to reserve bo (%d)\n", r);
|
||||
return;
|
||||
}
|
||||
bo_va = amdgpu_vm_bo_find(vm, rbo);
|
||||
bo_va = amdgpu_vm_bo_find(vm, bo);
|
||||
if (bo_va) {
|
||||
if (--bo_va->ref_count == 0) {
|
||||
amdgpu_vm_bo_rmv(adev, bo_va);
|
||||
}
|
||||
}
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
}
|
||||
|
||||
static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
|
||||
|
@ -274,18 +289,23 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
|
||||
bo->tbo.ttm->pages);
|
||||
if (r)
|
||||
goto unlock_mmap_sem;
|
||||
|
||||
r = amdgpu_bo_reserve(bo, true);
|
||||
if (r) {
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
goto release_object;
|
||||
}
|
||||
if (r)
|
||||
goto free_pages;
|
||||
|
||||
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
if (r)
|
||||
goto release_object;
|
||||
goto free_pages;
|
||||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
}
|
||||
|
||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||
|
@ -297,6 +317,12 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
args->handle = handle;
|
||||
return 0;
|
||||
|
||||
free_pages:
|
||||
release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
|
||||
|
||||
unlock_mmap_sem:
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
release_object:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
|
||||
|
@ -569,11 +595,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
tv.shared = true;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
if (args->operation == AMDGPU_VA_OP_MAP) {
|
||||
tv_pd.bo = &fpriv->vm.page_directory->tbo;
|
||||
tv_pd.shared = true;
|
||||
list_add(&tv_pd.head, &list);
|
||||
}
|
||||
tv_pd.bo = &fpriv->vm.page_directory->tbo;
|
||||
tv_pd.shared = true;
|
||||
list_add(&tv_pd.head, &list);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||
if (r) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
|
|
|
@ -90,9 +90,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
*/
|
||||
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
|
||||
{
|
||||
amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
|
||||
if (ib->fence)
|
||||
fence_put(&ib->fence->base);
|
||||
amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence);
|
||||
fence_put(ib->fence);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -101,7 +100,6 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
|
|||
* @adev: amdgpu_device pointer
|
||||
* @num_ibs: number of IBs to schedule
|
||||
* @ibs: IB objects to schedule
|
||||
* @owner: owner for creating the fences
|
||||
* @f: fence created during this submission
|
||||
*
|
||||
* Schedule an IB on the associated ring (all asics).
|
||||
|
@ -118,8 +116,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
|
|||
* to SI there was just a DE IB.
|
||||
*/
|
||||
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
struct amdgpu_ib *ibs, void *owner,
|
||||
struct fence *last_vm_update,
|
||||
struct amdgpu_ib *ibs, struct fence *last_vm_update,
|
||||
struct fence **f)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
@ -153,13 +150,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
|
||||
if (vm) {
|
||||
/* do context switch */
|
||||
amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr);
|
||||
|
||||
if (ring->funcs->emit_gds_switch)
|
||||
amdgpu_ring_emit_gds_switch(ring, ib->vm_id,
|
||||
ib->gds_base, ib->gds_size,
|
||||
ib->gws_base, ib->gws_size,
|
||||
ib->oa_base, ib->oa_size);
|
||||
amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
|
||||
ib->gds_base, ib->gds_size,
|
||||
ib->gws_base, ib->gws_size,
|
||||
ib->oa_base, ib->oa_size);
|
||||
|
||||
if (ring->funcs->emit_hdp_flush)
|
||||
amdgpu_ring_emit_hdp_flush(ring);
|
||||
|
@ -171,6 +165,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
|
||||
if (ib->ctx != ctx || ib->vm != vm) {
|
||||
ring->current_ctx = old_ctx;
|
||||
if (ib->vm_id)
|
||||
amdgpu_vm_reset_id(adev, ib->vm_id);
|
||||
amdgpu_ring_undo(ring);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -178,10 +174,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
ring->current_ctx = ctx;
|
||||
}
|
||||
|
||||
r = amdgpu_fence_emit(ring, owner, &ib->fence);
|
||||
if (vm) {
|
||||
if (ring->funcs->emit_hdp_invalidate)
|
||||
amdgpu_ring_emit_hdp_invalidate(ring);
|
||||
}
|
||||
|
||||
r = amdgpu_fence_emit(ring, &ib->fence);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
||||
ring->current_ctx = old_ctx;
|
||||
if (ib->vm_id)
|
||||
amdgpu_vm_reset_id(adev, ib->vm_id);
|
||||
amdgpu_ring_undo(ring);
|
||||
return r;
|
||||
}
|
||||
|
@ -195,7 +198,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
}
|
||||
|
||||
if (f)
|
||||
*f = fence_get(&ib->fence->base);
|
||||
*f = fence_get(ib->fence);
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
return 0;
|
||||
|
|
|
@ -148,7 +148,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
|||
}
|
||||
|
||||
trace_amdgpu_sched_run_job(job);
|
||||
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->owner,
|
||||
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
|
||||
job->sync.last_vm_update, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
|
|
|
@ -37,6 +37,8 @@ struct amdgpu_sync_entry {
|
|||
struct fence *fence;
|
||||
};
|
||||
|
||||
static struct kmem_cache *amdgpu_sync_slab;
|
||||
|
||||
/**
|
||||
* amdgpu_sync_create - zero init sync object
|
||||
*
|
||||
|
@ -50,14 +52,18 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
|
|||
sync->last_vm_update = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_same_dev - test if fence belong to us
|
||||
*
|
||||
* @adev: amdgpu device to use for the test
|
||||
* @f: fence to test
|
||||
*
|
||||
* Test if the fence was issued by us.
|
||||
*/
|
||||
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
|
||||
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
|
||||
|
||||
if (a_fence)
|
||||
return a_fence->ring->adev == adev;
|
||||
|
||||
if (s_fence) {
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
|
@ -68,17 +74,31 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
|
||||
/**
|
||||
* amdgpu_sync_get_owner - extract the owner of a fence
|
||||
*
|
||||
* @fence: fence get the owner from
|
||||
*
|
||||
* Extract who originally created the fence.
|
||||
*/
|
||||
static void *amdgpu_sync_get_owner(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
|
||||
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
|
||||
|
||||
if (s_fence)
|
||||
return s_fence->owner == owner;
|
||||
if (a_fence)
|
||||
return a_fence->owner == owner;
|
||||
return false;
|
||||
return s_fence->owner;
|
||||
|
||||
return AMDGPU_FENCE_OWNER_UNDEFINED;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_keep_later - Keep the later fence
|
||||
*
|
||||
* @keep: existing fence to test
|
||||
* @fence: new fence
|
||||
*
|
||||
* Either keep the existing fence or the new one, depending which one is later.
|
||||
*/
|
||||
static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
|
||||
{
|
||||
if (*keep && fence_is_later(*keep, fence))
|
||||
|
@ -104,7 +124,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
|||
return 0;
|
||||
|
||||
if (amdgpu_sync_same_dev(adev, f) &&
|
||||
amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM))
|
||||
amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
|
||||
amdgpu_sync_keep_later(&sync->last_vm_update, f);
|
||||
|
||||
hash_for_each_possible(sync->fences, e, node, f->context) {
|
||||
|
@ -115,7 +135,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
|||
return 0;
|
||||
}
|
||||
|
||||
e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
|
||||
e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
|
||||
if (!e)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -124,18 +144,6 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void *amdgpu_sync_get_owner(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
|
||||
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
|
||||
|
||||
if (s_fence)
|
||||
return s_fence->owner;
|
||||
else if (a_fence)
|
||||
return a_fence->owner;
|
||||
return AMDGPU_FENCE_OWNER_UNDEFINED;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_resv - sync to a reservation object
|
||||
*
|
||||
|
@ -208,7 +216,7 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
|
|||
f = e->fence;
|
||||
|
||||
hash_del(&e->node);
|
||||
kfree(e);
|
||||
kmem_cache_free(amdgpu_sync_slab, e);
|
||||
|
||||
if (!fence_is_signaled(f))
|
||||
return f;
|
||||
|
@ -231,7 +239,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
|
|||
|
||||
hash_del(&e->node);
|
||||
fence_put(e->fence);
|
||||
kfree(e);
|
||||
kmem_cache_free(amdgpu_sync_slab, e);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -253,8 +261,34 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
|
|||
hash_for_each_safe(sync->fences, i, tmp, e, node) {
|
||||
hash_del(&e->node);
|
||||
fence_put(e->fence);
|
||||
kfree(e);
|
||||
kmem_cache_free(amdgpu_sync_slab, e);
|
||||
}
|
||||
|
||||
fence_put(sync->last_vm_update);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_init - init sync object subsystem
|
||||
*
|
||||
* Allocate the slab allocator.
|
||||
*/
|
||||
int amdgpu_sync_init(void)
|
||||
{
|
||||
amdgpu_sync_slab = kmem_cache_create(
|
||||
"amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!amdgpu_sync_slab)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_fini - fini sync object subsystem
|
||||
*
|
||||
* Free the slab allocator.
|
||||
*/
|
||||
void amdgpu_sync_fini(void)
|
||||
{
|
||||
kmem_cache_destroy(amdgpu_sync_slab);
|
||||
}
|
||||
|
|
|
@ -494,29 +494,32 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
|
|||
/*
|
||||
* TTM backend functions.
|
||||
*/
|
||||
struct amdgpu_ttm_tt {
|
||||
struct ttm_dma_tt ttm;
|
||||
struct amdgpu_device *adev;
|
||||
u64 offset;
|
||||
uint64_t userptr;
|
||||
struct mm_struct *usermm;
|
||||
uint32_t userflags;
|
||||
struct amdgpu_ttm_gup_task_list {
|
||||
struct list_head list;
|
||||
struct task_struct *task;
|
||||
};
|
||||
|
||||
/* prepare the sg table with the user pages */
|
||||
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
||||
struct amdgpu_ttm_tt {
|
||||
struct ttm_dma_tt ttm;
|
||||
struct amdgpu_device *adev;
|
||||
u64 offset;
|
||||
uint64_t userptr;
|
||||
struct mm_struct *usermm;
|
||||
uint32_t userflags;
|
||||
spinlock_t guptasklock;
|
||||
struct list_head guptasks;
|
||||
atomic_t mmu_invalidations;
|
||||
};
|
||||
|
||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
unsigned pinned = 0, nents;
|
||||
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
|
||||
unsigned pinned = 0;
|
||||
int r;
|
||||
|
||||
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
|
||||
enum dma_data_direction direction = write ?
|
||||
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
||||
|
||||
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
|
||||
/* check that we only pin down anonymous memory
|
||||
/* check that we only use anonymous memory
|
||||
to prevent problems with writeback */
|
||||
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
|
||||
struct vm_area_struct *vma;
|
||||
|
@ -529,10 +532,21 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
|||
do {
|
||||
unsigned num_pages = ttm->num_pages - pinned;
|
||||
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
|
||||
struct page **pages = ttm->pages + pinned;
|
||||
struct page **p = pages + pinned;
|
||||
struct amdgpu_ttm_gup_task_list guptask;
|
||||
|
||||
guptask.task = current;
|
||||
spin_lock(>t->guptasklock);
|
||||
list_add(&guptask.list, >t->guptasks);
|
||||
spin_unlock(>t->guptasklock);
|
||||
|
||||
r = get_user_pages(current, current->mm, userptr, num_pages,
|
||||
write, 0, pages, NULL);
|
||||
write, 0, p, NULL);
|
||||
|
||||
spin_lock(>t->guptasklock);
|
||||
list_del(&guptask.list);
|
||||
spin_unlock(>t->guptasklock);
|
||||
|
||||
if (r < 0)
|
||||
goto release_pages;
|
||||
|
||||
|
@ -540,6 +554,25 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
|||
|
||||
} while (pinned < ttm->num_pages);
|
||||
|
||||
return 0;
|
||||
|
||||
release_pages:
|
||||
release_pages(pages, pinned, 0);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* prepare the sg table with the user pages */
|
||||
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
unsigned nents;
|
||||
int r;
|
||||
|
||||
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
|
||||
enum dma_data_direction direction = write ?
|
||||
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
||||
|
||||
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
|
||||
ttm->num_pages << PAGE_SHIFT,
|
||||
GFP_KERNEL);
|
||||
|
@ -558,9 +591,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
|||
|
||||
release_sg:
|
||||
kfree(ttm->sg);
|
||||
|
||||
release_pages:
|
||||
release_pages(ttm->pages, pinned, 0);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -783,6 +813,10 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
|||
gtt->userptr = addr;
|
||||
gtt->usermm = current->mm;
|
||||
gtt->userflags = flags;
|
||||
spin_lock_init(>t->guptasklock);
|
||||
INIT_LIST_HEAD(>t->guptasks);
|
||||
atomic_set(>t->mmu_invalidations, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -800,21 +834,40 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
|||
unsigned long end)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_gup_task_list *entry;
|
||||
unsigned long size;
|
||||
|
||||
if (gtt == NULL)
|
||||
return false;
|
||||
|
||||
if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
|
||||
if (gtt == NULL || !gtt->userptr)
|
||||
return false;
|
||||
|
||||
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
|
||||
if (gtt->userptr > end || gtt->userptr + size <= start)
|
||||
return false;
|
||||
|
||||
spin_lock(>t->guptasklock);
|
||||
list_for_each_entry(entry, >t->guptasks, list) {
|
||||
if (entry->task == current) {
|
||||
spin_unlock(>t->guptasklock);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
spin_unlock(>t->guptasklock);
|
||||
|
||||
atomic_inc(>t->mmu_invalidations);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
|
||||
int *last_invalidated)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
int prev_invalidated = *last_invalidated;
|
||||
|
||||
*last_invalidated = atomic_read(>t->mmu_invalidations);
|
||||
return prev_invalidated != *last_invalidated;
|
||||
}
|
||||
|
||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
|
|
|
@ -886,8 +886,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
|||
ib->length_dw = 16;
|
||||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
||||
|
|
|
@ -425,8 +425,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
@ -487,9 +486,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -95,6 +95,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
|||
entry->priority = 0;
|
||||
entry->tv.bo = &vm->page_directory->tbo;
|
||||
entry->tv.shared = true;
|
||||
entry->user_pages = NULL;
|
||||
list_add(&entry->tv.head, validated);
|
||||
}
|
||||
|
||||
|
@ -188,6 +189,13 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||
if (!is_later && owner == (long)id &&
|
||||
pd_addr == id->pd_gpu_addr) {
|
||||
|
||||
r = amdgpu_sync_fence(ring->adev, sync,
|
||||
id->mgr_id->active);
|
||||
if (r) {
|
||||
mutex_unlock(&adev->vm_manager.lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
fence_put(id->mgr_id->active);
|
||||
id->mgr_id->active = fence_get(fence);
|
||||
|
||||
|
@ -234,19 +242,68 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||
* amdgpu_vm_flush - hardware flush the vm
|
||||
*
|
||||
* @ring: ring to use for flush
|
||||
* @vmid: vmid number to use
|
||||
* @vm_id: vmid number to use
|
||||
* @pd_addr: address of the page directory
|
||||
*
|
||||
* Emit a VM flush when it is necessary.
|
||||
*/
|
||||
void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vmid,
|
||||
uint64_t pd_addr)
|
||||
unsigned vm_id, uint64_t pd_addr,
|
||||
uint32_t gds_base, uint32_t gds_size,
|
||||
uint32_t gws_base, uint32_t gws_size,
|
||||
uint32_t oa_base, uint32_t oa_size)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
|
||||
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
|
||||
mgr_id->gds_base != gds_base ||
|
||||
mgr_id->gds_size != gds_size ||
|
||||
mgr_id->gws_base != gws_base ||
|
||||
mgr_id->gws_size != gws_size ||
|
||||
mgr_id->oa_base != oa_base ||
|
||||
mgr_id->oa_size != oa_size);
|
||||
|
||||
if (ring->funcs->emit_pipeline_sync && (
|
||||
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
|
||||
amdgpu_ring_emit_pipeline_sync(ring);
|
||||
|
||||
if (pd_addr != AMDGPU_VM_NO_FLUSH) {
|
||||
trace_amdgpu_vm_flush(pd_addr, ring->idx, vmid);
|
||||
amdgpu_ring_emit_vm_flush(ring, vmid, pd_addr);
|
||||
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
|
||||
amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
|
||||
}
|
||||
|
||||
if (gds_switch_needed) {
|
||||
mgr_id->gds_base = gds_base;
|
||||
mgr_id->gds_size = gds_size;
|
||||
mgr_id->gws_base = gws_base;
|
||||
mgr_id->gws_size = gws_size;
|
||||
mgr_id->oa_base = oa_base;
|
||||
mgr_id->oa_size = oa_size;
|
||||
amdgpu_ring_emit_gds_switch(ring, vm_id,
|
||||
gds_base, gds_size,
|
||||
gws_base, gws_size,
|
||||
oa_base, oa_size);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_reset_id - reset VMID to zero
|
||||
*
|
||||
* @adev: amdgpu device structure
|
||||
* @vm_id: vmid number to use
|
||||
*
|
||||
* Reset saved GDW, GWS and OA to force switch on next flush.
|
||||
*/
|
||||
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
|
||||
{
|
||||
struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
|
||||
|
||||
mgr_id->gds_base = 0;
|
||||
mgr_id->gds_size = 0;
|
||||
mgr_id->gws_base = 0;
|
||||
mgr_id->gws_size = 0;
|
||||
mgr_id->oa_base = 0;
|
||||
mgr_id->oa_size = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -810,7 +867,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
|||
while (start != mapping->it.last + 1) {
|
||||
uint64_t last;
|
||||
|
||||
last = min((uint64_t)mapping->it.last, start + max_size);
|
||||
last = min((uint64_t)mapping->it.last, start + max_size - 1);
|
||||
r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
|
||||
start, last, flags, addr,
|
||||
fence);
|
||||
|
@ -818,7 +875,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
|||
return r;
|
||||
|
||||
start = last + 1;
|
||||
addr += max_size;
|
||||
addr += max_size * AMDGPU_GPU_PAGE_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -914,22 +971,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo_va_mapping *mapping;
|
||||
int r;
|
||||
|
||||
spin_lock(&vm->freed_lock);
|
||||
while (!list_empty(&vm->freed)) {
|
||||
mapping = list_first_entry(&vm->freed,
|
||||
struct amdgpu_bo_va_mapping, list);
|
||||
list_del(&mapping->list);
|
||||
spin_unlock(&vm->freed_lock);
|
||||
|
||||
r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping,
|
||||
0, NULL);
|
||||
kfree(mapping);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
spin_lock(&vm->freed_lock);
|
||||
}
|
||||
spin_unlock(&vm->freed_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
@ -956,9 +1009,8 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
|
|||
bo_va = list_first_entry(&vm->invalidated,
|
||||
struct amdgpu_bo_va, vm_status);
|
||||
spin_unlock(&vm->status_lock);
|
||||
mutex_lock(&bo_va->mutex);
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, NULL);
|
||||
mutex_unlock(&bo_va->mutex);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1002,7 +1054,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|||
INIT_LIST_HEAD(&bo_va->valids);
|
||||
INIT_LIST_HEAD(&bo_va->invalids);
|
||||
INIT_LIST_HEAD(&bo_va->vm_status);
|
||||
mutex_init(&bo_va->mutex);
|
||||
|
||||
list_add_tail(&bo_va->bo_list, &bo->va);
|
||||
|
||||
return bo_va;
|
||||
|
@ -1054,9 +1106,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
spin_lock(&vm->it_lock);
|
||||
it = interval_tree_iter_first(&vm->va, saddr, eaddr);
|
||||
spin_unlock(&vm->it_lock);
|
||||
if (it) {
|
||||
struct amdgpu_bo_va_mapping *tmp;
|
||||
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
|
||||
|
@ -1080,13 +1130,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
mapping->offset = offset;
|
||||
mapping->flags = flags;
|
||||
|
||||
mutex_lock(&bo_va->mutex);
|
||||
list_add(&mapping->list, &bo_va->invalids);
|
||||
mutex_unlock(&bo_va->mutex);
|
||||
spin_lock(&vm->it_lock);
|
||||
interval_tree_insert(&mapping->it, &vm->va);
|
||||
spin_unlock(&vm->it_lock);
|
||||
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
||||
|
||||
/* Make sure the page tables are allocated */
|
||||
saddr >>= amdgpu_vm_block_size;
|
||||
|
@ -1130,6 +1175,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
entry->priority = 0;
|
||||
entry->tv.bo = &entry->robj->tbo;
|
||||
entry->tv.shared = true;
|
||||
entry->user_pages = NULL;
|
||||
vm->page_tables[pt_idx].addr = 0;
|
||||
}
|
||||
|
||||
|
@ -1137,9 +1183,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
|
||||
error_free:
|
||||
list_del(&mapping->list);
|
||||
spin_lock(&vm->it_lock);
|
||||
interval_tree_remove(&mapping->it, &vm->va);
|
||||
spin_unlock(&vm->it_lock);
|
||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||
kfree(mapping);
|
||||
|
||||
|
@ -1168,7 +1212,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|||
bool valid = true;
|
||||
|
||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
mutex_lock(&bo_va->mutex);
|
||||
|
||||
list_for_each_entry(mapping, &bo_va->valids, list) {
|
||||
if (mapping->it.start == saddr)
|
||||
break;
|
||||
|
@ -1182,25 +1226,18 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|||
break;
|
||||
}
|
||||
|
||||
if (&mapping->list == &bo_va->invalids) {
|
||||
mutex_unlock(&bo_va->mutex);
|
||||
if (&mapping->list == &bo_va->invalids)
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&bo_va->mutex);
|
||||
|
||||
list_del(&mapping->list);
|
||||
spin_lock(&vm->it_lock);
|
||||
interval_tree_remove(&mapping->it, &vm->va);
|
||||
spin_unlock(&vm->it_lock);
|
||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||
|
||||
if (valid) {
|
||||
spin_lock(&vm->freed_lock);
|
||||
if (valid)
|
||||
list_add(&mapping->list, &vm->freed);
|
||||
spin_unlock(&vm->freed_lock);
|
||||
} else {
|
||||
else
|
||||
kfree(mapping);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1229,23 +1266,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|||
|
||||
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
|
||||
list_del(&mapping->list);
|
||||
spin_lock(&vm->it_lock);
|
||||
interval_tree_remove(&mapping->it, &vm->va);
|
||||
spin_unlock(&vm->it_lock);
|
||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||
spin_lock(&vm->freed_lock);
|
||||
list_add(&mapping->list, &vm->freed);
|
||||
spin_unlock(&vm->freed_lock);
|
||||
}
|
||||
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
||||
list_del(&mapping->list);
|
||||
spin_lock(&vm->it_lock);
|
||||
interval_tree_remove(&mapping->it, &vm->va);
|
||||
spin_unlock(&vm->it_lock);
|
||||
kfree(mapping);
|
||||
}
|
||||
|
||||
fence_put(bo_va->last_pt_update);
|
||||
mutex_destroy(&bo_va->mutex);
|
||||
kfree(bo_va);
|
||||
}
|
||||
|
||||
|
@ -1298,8 +1329,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
INIT_LIST_HEAD(&vm->invalidated);
|
||||
INIT_LIST_HEAD(&vm->cleared);
|
||||
INIT_LIST_HEAD(&vm->freed);
|
||||
spin_lock_init(&vm->it_lock);
|
||||
spin_lock_init(&vm->freed_lock);
|
||||
|
||||
pd_size = amdgpu_vm_directory_size(adev);
|
||||
pd_entries = amdgpu_vm_num_pdes(adev);
|
||||
|
||||
|
@ -1386,6 +1416,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
|
||||
amdgpu_bo_unref(&vm->page_directory);
|
||||
fence_put(vm->page_directory_fence);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_vm_id *id = &vm->ids[i];
|
||||
|
||||
|
@ -1410,9 +1441,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|||
INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
|
||||
|
||||
/* skip over VMID 0, since it is the system VM */
|
||||
for (i = 1; i < adev->vm_manager.num_ids; ++i)
|
||||
for (i = 1; i < adev->vm_manager.num_ids; ++i) {
|
||||
amdgpu_vm_reset_id(adev, i);
|
||||
list_add_tail(&adev->vm_manager.ids[i].list,
|
||||
&adev->vm_manager.ids_lru);
|
||||
}
|
||||
|
||||
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
|
||||
}
|
||||
|
|
|
@ -3017,7 +3017,6 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
|
|||
&memory_level->MinVddcPhases);
|
||||
|
||||
memory_level->EnabledForThrottle = 1;
|
||||
memory_level->EnabledForActivity = 1;
|
||||
memory_level->UpH = 0;
|
||||
memory_level->DownH = 100;
|
||||
memory_level->VoltageDownH = 0;
|
||||
|
@ -3376,7 +3375,6 @@ static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
|
|||
graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
|
||||
graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
|
||||
graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
|
||||
graphic_level->EnabledForActivity = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3407,6 +3405,7 @@ static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
|
|||
pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
|
||||
PPSMC_DISPLAY_WATERMARK_HIGH;
|
||||
}
|
||||
pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
|
||||
|
||||
pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
|
||||
pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
|
||||
|
@ -3450,6 +3449,8 @@ static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
|
||||
|
||||
if ((dpm_table->mclk_table.count >= 2) &&
|
||||
((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
|
||||
pi->smc_state_table.MemoryLevel[1].MinVddc =
|
||||
|
@ -4381,26 +4382,6 @@ static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
}
|
||||
if ((!pi->pcie_dpm_key_disabled) &&
|
||||
pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
|
||||
levels = 0;
|
||||
tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
|
||||
while (tmp >>= 1)
|
||||
levels++;
|
||||
if (levels) {
|
||||
ret = ci_dpm_force_state_pcie(adev, level);
|
||||
if (ret)
|
||||
return ret;
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
|
||||
TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
|
||||
TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
|
||||
if (tmp == levels)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
|
||||
if ((!pi->sclk_dpm_key_disabled) &&
|
||||
pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
|
||||
|
@ -5395,30 +5376,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev)
|
|||
|
||||
ci_update_current_ps(adev, boot_ps);
|
||||
|
||||
if (adev->irq.installed &&
|
||||
amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
|
||||
#if 0
|
||||
PPSMC_Result result;
|
||||
#endif
|
||||
ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
|
||||
CISLANDS_TEMP_RANGE_MAX);
|
||||
if (ret) {
|
||||
DRM_ERROR("ci_thermal_set_temperature_range failed\n");
|
||||
return ret;
|
||||
}
|
||||
amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
|
||||
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
|
||||
amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
|
||||
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
|
||||
|
||||
#if 0
|
||||
result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
|
||||
|
||||
if (result != PPSMC_Result_OK)
|
||||
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2028,8 +2028,6 @@ static int cik_common_early_init(void *handle)
|
|||
|
||||
adev->asic_funcs = &cik_asic_funcs;
|
||||
|
||||
adev->has_uvd = true;
|
||||
|
||||
adev->rev_id = cik_get_rev_id(adev);
|
||||
adev->external_rev_id = 0xFF;
|
||||
switch (adev->asic_type) {
|
||||
|
|
|
@ -261,6 +261,13 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
static void cik_sdma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
||||
amdgpu_ring_write(ring, mmHDP_DEBUG0);
|
||||
amdgpu_ring_write(ring, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_ring_emit_fence - emit a fence on the DMA ring
|
||||
*
|
||||
|
@ -636,8 +643,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
|
|||
ib.ptr[3] = 1;
|
||||
ib.ptr[4] = 0xDEADBEEF;
|
||||
ib.length_dw = 5;
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
@ -815,6 +821,30 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
|
|||
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_ring_emit_pipeline_sync - sync the pipeline
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Make sure all previous operations are completed (CIK).
|
||||
*/
|
||||
static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint32_t seq = ring->fence_drv.sync_seq;
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
/* wait for idle */
|
||||
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0,
|
||||
SDMA_POLL_REG_MEM_EXTRA_OP(0) |
|
||||
SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */
|
||||
SDMA_POLL_REG_MEM_EXTRA_M));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, seq); /* reference */
|
||||
amdgpu_ring_write(ring, 0xfffffff); /* mask */
|
||||
amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
|
||||
*
|
||||
|
@ -1270,8 +1300,10 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = cik_sdma_ring_emit_ib,
|
||||
.emit_fence = cik_sdma_ring_emit_fence,
|
||||
.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
|
||||
.emit_vm_flush = cik_sdma_ring_emit_vm_flush,
|
||||
.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
|
||||
.emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate,
|
||||
.test_ring = cik_sdma_ring_test_ring,
|
||||
.test_ib = cik_sdma_ring_test_ib,
|
||||
.insert_nop = cik_sdma_ring_insert_nop,
|
||||
|
|
|
@ -1668,6 +1668,9 @@ static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (!amdgpu_audio)
|
||||
return;
|
||||
|
||||
if (!adev->mode_info.audio.enabled)
|
||||
return;
|
||||
|
||||
|
@ -1973,7 +1976,7 @@ static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
|
|||
enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
|
||||
}
|
||||
|
||||
static void dce_v10_0_afmt_init(struct amdgpu_device *adev)
|
||||
static int dce_v10_0_afmt_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -1986,8 +1989,16 @@ static void dce_v10_0_afmt_init(struct amdgpu_device *adev)
|
|||
if (adev->mode_info.afmt[i]) {
|
||||
adev->mode_info.afmt[i]->offset = dig_offsets[i];
|
||||
adev->mode_info.afmt[i]->id = i;
|
||||
} else {
|
||||
int j;
|
||||
for (j = 0; j < i; j++) {
|
||||
kfree(adev->mode_info.afmt[j]);
|
||||
adev->mode_info.afmt[j] = NULL;
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
|
||||
|
@ -2064,8 +2075,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (atomic) {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
target_fb = fb;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
target_fb = crtc->primary->fb;
|
||||
}
|
||||
|
@ -2079,9 +2089,9 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
if (atomic)
|
||||
if (atomic) {
|
||||
fb_location = amdgpu_bo_gpu_offset(rbo);
|
||||
else {
|
||||
} else {
|
||||
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
|
@ -2700,13 +2710,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_update(adev, &adev->crtc_irq, type);
|
||||
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
|
||||
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
drm_vblank_on(dev, amdgpu_crtc->crtc_id);
|
||||
dce_v10_0_crtc_load_lut(crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
drm_vblank_off(dev, amdgpu_crtc->crtc_id);
|
||||
if (amdgpu_crtc->enabled) {
|
||||
dce_v10_0_vga_enable(crtc, true);
|
||||
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
|
||||
|
@ -2980,8 +2990,6 @@ static int dce_v10_0_sw_init(void *handle)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
adev->mode_info.mode_config_initialized = true;
|
||||
|
||||
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
|
||||
|
||||
adev->ddev->mode_config.max_width = 16384;
|
||||
|
@ -3012,7 +3020,9 @@ static int dce_v10_0_sw_init(void *handle)
|
|||
return -EINVAL;
|
||||
|
||||
/* setup afmt */
|
||||
dce_v10_0_afmt_init(adev);
|
||||
r = dce_v10_0_afmt_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = dce_v10_0_audio_init(adev);
|
||||
if (r)
|
||||
|
@ -3020,7 +3030,8 @@ static int dce_v10_0_sw_init(void *handle)
|
|||
|
||||
drm_kms_helper_poll_init(adev->ddev);
|
||||
|
||||
return r;
|
||||
adev->mode_info.mode_config_initialized = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_v10_0_sw_fini(void *handle)
|
||||
|
|
|
@ -1658,6 +1658,9 @@ static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (!amdgpu_audio)
|
||||
return;
|
||||
|
||||
if (!adev->mode_info.audio.enabled)
|
||||
return;
|
||||
|
||||
|
@ -1963,7 +1966,7 @@ static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
|
|||
enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
|
||||
}
|
||||
|
||||
static void dce_v11_0_afmt_init(struct amdgpu_device *adev)
|
||||
static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -1976,8 +1979,16 @@ static void dce_v11_0_afmt_init(struct amdgpu_device *adev)
|
|||
if (adev->mode_info.afmt[i]) {
|
||||
adev->mode_info.afmt[i]->offset = dig_offsets[i];
|
||||
adev->mode_info.afmt[i]->id = i;
|
||||
} else {
|
||||
int j;
|
||||
for (j = 0; j < i; j++) {
|
||||
kfree(adev->mode_info.afmt[j]);
|
||||
adev->mode_info.afmt[j] = NULL;
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
|
||||
|
@ -2054,8 +2065,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (atomic) {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
target_fb = fb;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
target_fb = crtc->primary->fb;
|
||||
}
|
||||
|
@ -2069,9 +2079,9 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
if (atomic)
|
||||
if (atomic) {
|
||||
fb_location = amdgpu_bo_gpu_offset(rbo);
|
||||
else {
|
||||
} else {
|
||||
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
|
@ -2691,13 +2701,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_update(adev, &adev->crtc_irq, type);
|
||||
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
|
||||
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
drm_vblank_on(dev, amdgpu_crtc->crtc_id);
|
||||
dce_v11_0_crtc_load_lut(crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
drm_vblank_off(dev, amdgpu_crtc->crtc_id);
|
||||
if (amdgpu_crtc->enabled) {
|
||||
dce_v11_0_vga_enable(crtc, true);
|
||||
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
|
||||
|
@ -2961,7 +2971,7 @@ static int dce_v11_0_sw_init(void *handle)
|
|||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
|
||||
if (r)
|
||||
return r;
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 8; i < 20; i += 2) {
|
||||
|
@ -2973,9 +2983,7 @@ static int dce_v11_0_sw_init(void *handle)
|
|||
/* HPD hotplug */
|
||||
r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->mode_info.mode_config_initialized = true;
|
||||
return r;
|
||||
|
||||
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
|
||||
|
||||
|
@ -2994,6 +3002,7 @@ static int dce_v11_0_sw_init(void *handle)
|
|||
adev->ddev->mode_config.max_width = 16384;
|
||||
adev->ddev->mode_config.max_height = 16384;
|
||||
|
||||
|
||||
/* allocate crtcs */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
r = dce_v11_0_crtc_init(adev, i);
|
||||
|
@ -3007,7 +3016,9 @@ static int dce_v11_0_sw_init(void *handle)
|
|||
return -EINVAL;
|
||||
|
||||
/* setup afmt */
|
||||
dce_v11_0_afmt_init(adev);
|
||||
r = dce_v11_0_afmt_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = dce_v11_0_audio_init(adev);
|
||||
if (r)
|
||||
|
@ -3015,7 +3026,8 @@ static int dce_v11_0_sw_init(void *handle)
|
|||
|
||||
drm_kms_helper_poll_init(adev->ddev);
|
||||
|
||||
return r;
|
||||
adev->mode_info.mode_config_initialized = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_v11_0_sw_fini(void *handle)
|
||||
|
|
|
@ -1639,6 +1639,9 @@ static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (!amdgpu_audio)
|
||||
return;
|
||||
|
||||
if (!adev->mode_info.audio.enabled)
|
||||
return;
|
||||
|
||||
|
@ -1910,7 +1913,7 @@ static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
|
|||
enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
|
||||
}
|
||||
|
||||
static void dce_v8_0_afmt_init(struct amdgpu_device *adev)
|
||||
static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -1923,8 +1926,16 @@ static void dce_v8_0_afmt_init(struct amdgpu_device *adev)
|
|||
if (adev->mode_info.afmt[i]) {
|
||||
adev->mode_info.afmt[i]->offset = dig_offsets[i];
|
||||
adev->mode_info.afmt[i]->id = i;
|
||||
} else {
|
||||
int j;
|
||||
for (j = 0; j < i; j++) {
|
||||
kfree(adev->mode_info.afmt[j]);
|
||||
adev->mode_info.afmt[j] = NULL;
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
|
||||
|
@ -2001,8 +2012,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (atomic) {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
target_fb = fb;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
target_fb = crtc->primary->fb;
|
||||
}
|
||||
|
@ -2016,9 +2026,9 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
if (atomic)
|
||||
if (atomic) {
|
||||
fb_location = amdgpu_bo_gpu_offset(rbo);
|
||||
else {
|
||||
} else {
|
||||
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
|
@ -2612,13 +2622,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_update(adev, &adev->crtc_irq, type);
|
||||
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
|
||||
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
drm_vblank_on(dev, amdgpu_crtc->crtc_id);
|
||||
dce_v8_0_crtc_load_lut(crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
drm_vblank_off(dev, amdgpu_crtc->crtc_id);
|
||||
if (amdgpu_crtc->enabled) {
|
||||
dce_v8_0_vga_enable(crtc, true);
|
||||
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
|
||||
|
@ -2890,8 +2900,6 @@ static int dce_v8_0_sw_init(void *handle)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
adev->mode_info.mode_config_initialized = true;
|
||||
|
||||
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
|
||||
|
||||
adev->ddev->mode_config.max_width = 16384;
|
||||
|
@ -2922,7 +2930,9 @@ static int dce_v8_0_sw_init(void *handle)
|
|||
return -EINVAL;
|
||||
|
||||
/* setup afmt */
|
||||
dce_v8_0_afmt_init(adev);
|
||||
r = dce_v8_0_afmt_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = dce_v8_0_audio_init(adev);
|
||||
if (r)
|
||||
|
@ -2930,7 +2940,8 @@ static int dce_v8_0_sw_init(void *handle)
|
|||
|
||||
drm_kms_helper_poll_init(adev->ddev);
|
||||
|
||||
return r;
|
||||
adev->mode_info.mode_config_initialized = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_v8_0_sw_fini(void *handle)
|
||||
|
|
|
@ -1924,6 +1924,25 @@ static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||
amdgpu_ring_write(ring, 0x20); /* poll interval */
|
||||
}
|
||||
|
||||
/**
|
||||
* gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ridx: amdgpu ring index
|
||||
*
|
||||
* Emits an hdp invalidate on the cp.
|
||||
*/
|
||||
static void gfx_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
|
||||
WRITE_DATA_DST_SEL(0) |
|
||||
WR_CONFIRM));
|
||||
amdgpu_ring_write(ring, mmHDP_DEBUG0);
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
|
||||
*
|
||||
|
@ -2117,8 +2136,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
|
|||
ib.ptr[2] = 0xDEADBEEF;
|
||||
ib.length_dw = 3;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err2;
|
||||
|
||||
|
@ -3023,6 +3041,26 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
|
||||
*
|
||||
* @ring: the ring to emmit the commands to
|
||||
*
|
||||
* Sync the command pipeline with the PFP. E.g. wait for everything
|
||||
* to be completed.
|
||||
*/
|
||||
static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
if (usepfp) {
|
||||
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* vm
|
||||
* VMID 0 is the physical GPU addresses as used by the kernel.
|
||||
|
@ -3054,14 +3092,6 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, 0xffffffff);
|
||||
amdgpu_ring_write(ring, 4); /* poll interval */
|
||||
|
||||
if (usepfp) {
|
||||
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
|
||||
WRITE_DATA_DST_SEL(0)));
|
||||
|
@ -5142,9 +5172,11 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
|
||||
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
|
||||
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
|
||||
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
|
||||
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
|
||||
.emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
|
||||
.test_ring = gfx_v7_0_ring_test_ring,
|
||||
.test_ib = gfx_v7_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
|
@ -5158,9 +5190,11 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
|
||||
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
|
||||
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
|
||||
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
|
||||
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
|
||||
.emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
|
||||
.test_ring = gfx_v7_0_ring_test_ring,
|
||||
.test_ib = gfx_v7_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
|
|
|
@ -706,8 +706,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
|
|||
ib.ptr[2] = 0xDEADBEEF;
|
||||
ib.length_dw = 3;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err2;
|
||||
|
||||
|
@ -1262,8 +1261,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
|||
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
|
||||
|
||||
/* shedule the ib on the ring */
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
|
||||
goto fail;
|
||||
|
@ -4589,6 +4587,18 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||
amdgpu_ring_write(ring, 0x20); /* poll interval */
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
|
||||
WRITE_DATA_DST_SEL(0) |
|
||||
WR_CONFIRM));
|
||||
amdgpu_ring_write(ring, mmHDP_DEBUG0);
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, 1);
|
||||
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
|
@ -4682,8 +4692,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
|||
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vm_id, uint64_t pd_addr)
|
||||
static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
uint32_t seq = ring->fence_drv.sync_seq;
|
||||
|
@ -4706,6 +4715,12 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vm_id, uint64_t pd_addr)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
|
||||
|
@ -5028,9 +5043,11 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
|
||||
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
|
||||
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
|
||||
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
|
||||
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
|
||||
.emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
|
||||
.test_ring = gfx_v8_0_ring_test_ring,
|
||||
.test_ib = gfx_v8_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
|
@ -5044,9 +5061,11 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
|
||||
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
|
||||
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
|
||||
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
|
||||
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
|
||||
.emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
|
||||
.test_ring = gfx_v8_0_ring_test_ring,
|
||||
.test_ib = gfx_v8_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
|
|
|
@ -339,7 +339,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
|
|||
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
|
||||
|
||||
tmp = RREG32(mmHDP_MISC_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
|
||||
WREG32(mmHDP_MISC_CNTL, tmp);
|
||||
|
||||
tmp = RREG32(mmHDP_HOST_PATH_CNTL);
|
||||
|
|
|
@ -386,7 +386,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
|
|||
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
|
||||
|
||||
tmp = RREG32(mmHDP_MISC_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
|
||||
WREG32(mmHDP_MISC_CNTL, tmp);
|
||||
|
||||
tmp = RREG32(mmHDP_HOST_PATH_CNTL);
|
||||
|
|
|
@ -300,6 +300,13 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
static void sdma_v2_4_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
|
||||
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
|
||||
amdgpu_ring_write(ring, mmHDP_DEBUG0);
|
||||
amdgpu_ring_write(ring, 1);
|
||||
}
|
||||
/**
|
||||
* sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
|
||||
*
|
||||
|
@ -694,8 +701,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
|
|||
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
|
||||
ib.length_dw = 8;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
@ -873,6 +879,31 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
|
|||
SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Make sure all previous operations are completed (CIK).
|
||||
*/
|
||||
static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint32_t seq = ring->fence_drv.sync_seq;
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
/* wait for idle */
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, seq); /* reference */
|
||||
amdgpu_ring_write(ring, 0xfffffff); /* mask */
|
||||
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
|
||||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
|
||||
*
|
||||
|
@ -1274,8 +1305,10 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = sdma_v2_4_ring_emit_ib,
|
||||
.emit_fence = sdma_v2_4_ring_emit_fence,
|
||||
.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
|
||||
.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
|
||||
.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
|
||||
.emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate,
|
||||
.test_ring = sdma_v2_4_ring_test_ring,
|
||||
.test_ib = sdma_v2_4_ring_test_ib,
|
||||
.insert_nop = sdma_v2_4_ring_insert_nop,
|
||||
|
|
|
@ -410,6 +410,14 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
static void sdma_v3_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
|
||||
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
|
||||
amdgpu_ring_write(ring, mmHDP_DEBUG0);
|
||||
amdgpu_ring_write(ring, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
|
||||
*
|
||||
|
@ -845,8 +853,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
|
|||
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
|
||||
ib.length_dw = 8;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
@ -1023,6 +1030,31 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
|
|||
SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Make sure all previous operations are completed (CIK).
|
||||
*/
|
||||
static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint32_t seq = ring->fence_drv.sync_seq;
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
/* wait for idle */
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, seq); /* reference */
|
||||
amdgpu_ring_write(ring, 0xfffffff); /* mask */
|
||||
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
|
||||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
|
||||
*
|
||||
|
@ -1541,8 +1573,10 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = sdma_v3_0_ring_emit_ib,
|
||||
.emit_fence = sdma_v3_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
|
||||
.emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
|
||||
.emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
|
||||
.emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate,
|
||||
.test_ring = sdma_v3_0_ring_test_ring,
|
||||
.test_ib = sdma_v3_0_ring_test_ib,
|
||||
.insert_nop = sdma_v3_0_ring_insert_nop,
|
||||
|
|
|
@ -1071,26 +1071,22 @@ static int vi_common_early_init(void *handle)
|
|||
adev->external_rev_id = 0xFF;
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TOPAZ:
|
||||
adev->has_uvd = false;
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = 0x1;
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
adev->has_uvd = true;
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x3c;
|
||||
break;
|
||||
case CHIP_TONGA:
|
||||
adev->has_uvd = true;
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x14;
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
adev->has_uvd = true;
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x1;
|
||||
|
|
|
@ -744,8 +744,9 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
|
|||
cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
|
||||
|
||||
clock = hwmgr->display_config.min_core_set_clock;
|
||||
;
|
||||
if (clock == 0)
|
||||
printk(KERN_ERR "[ powerplay ] min_core_set_clock not set\n");
|
||||
printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n");
|
||||
|
||||
if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
|
||||
cz_hwmgr->sclk_dpm.hard_min_clk = clock;
|
||||
|
|
|
@ -34,6 +34,11 @@ static int phm_run_table(struct pp_hwmgr *hwmgr,
|
|||
int result = 0;
|
||||
phm_table_function *function;
|
||||
|
||||
if (rt_table->function_list == NULL) {
|
||||
printk(KERN_INFO "[ powerplay ] this function not implement!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (function = rt_table->function_list; NULL != *function; function++) {
|
||||
int tmp = (*function)(hwmgr, input, output, temp_storage, result);
|
||||
|
||||
|
@ -57,9 +62,9 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
|
|||
int result = 0;
|
||||
void *temp_storage = NULL;
|
||||
|
||||
if (hwmgr == NULL || rt_table == NULL || rt_table->function_list == NULL) {
|
||||
if (hwmgr == NULL || rt_table == NULL) {
|
||||
printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
|
||||
return 0; /*temp return ture because some function not implement on some asic */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (0 != rt_table->storage_size) {
|
||||
|
|
|
@ -275,13 +275,13 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
|
||||
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
|
||||
atombios_blank_crtc(crtc, ATOM_DISABLE);
|
||||
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
|
||||
drm_vblank_on(dev, radeon_crtc->crtc_id);
|
||||
radeon_crtc_load_lut(crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
|
||||
drm_vblank_off(dev, radeon_crtc->crtc_id);
|
||||
if (radeon_crtc->enabled)
|
||||
atombios_blank_crtc(crtc, ATOM_ENABLE);
|
||||
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
|
||||
|
|
|
@ -892,8 +892,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
|
|||
else
|
||||
args.v1.ucLaneNum = 4;
|
||||
|
||||
if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
|
||||
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
|
||||
|
@ -910,6 +908,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
|
|||
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
|
||||
else
|
||||
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
|
||||
|
||||
if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
|
||||
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
|
||||
|
||||
break;
|
||||
case 2:
|
||||
case 3:
|
||||
|
|
|
@ -62,6 +62,10 @@ bool radeon_has_atpx(void) {
|
|||
return radeon_atpx_priv.atpx_detected;
|
||||
}
|
||||
|
||||
bool radeon_has_atpx_dgpu_power_cntl(void) {
|
||||
return radeon_atpx_priv.atpx.functions.power_cntl;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_atpx_call - call an ATPX method
|
||||
*
|
||||
|
@ -141,10 +145,6 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
|
|||
*/
|
||||
static int radeon_atpx_validate(struct radeon_atpx *atpx)
|
||||
{
|
||||
/* make sure required functions are enabled */
|
||||
/* dGPU power control is required */
|
||||
atpx->functions.power_cntl = true;
|
||||
|
||||
if (atpx->functions.px_params) {
|
||||
union acpi_object *info;
|
||||
struct atpx_px_params output;
|
||||
|
|
|
@ -103,6 +103,12 @@ static const char radeon_family_name[][16] = {
|
|||
"LAST",
|
||||
};
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
bool radeon_has_atpx_dgpu_power_cntl(void);
|
||||
#else
|
||||
static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
|
||||
#endif
|
||||
|
||||
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
|
||||
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
|
||||
|
||||
|
@ -1433,7 +1439,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
* ignore it */
|
||||
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
|
||||
|
||||
if (rdev->flags & RADEON_IS_PX)
|
||||
if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
|
||||
runtime = true;
|
||||
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
|
||||
if (runtime)
|
||||
|
|
|
@ -331,13 +331,13 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
RADEON_CRTC_DISP_REQ_EN_B));
|
||||
WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
|
||||
}
|
||||
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
|
||||
drm_vblank_on(dev, radeon_crtc->crtc_id);
|
||||
radeon_crtc_load_lut(crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
|
||||
drm_vblank_off(dev, radeon_crtc->crtc_id);
|
||||
if (radeon_crtc->crtc_id)
|
||||
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
|
||||
else {
|
||||
|
|
Loading…
Reference in New Issue