Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few more fixes for 4.15. * 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux: drm/amdgpu: use irq-safe lock for kiq->ring_lock drm/amdgpu: bypass lru touch for KIQ ring submission drm/amdgpu: Potential uninitialized variable in amdgpu_vm_update_directories() drm/amdgpu: potential uninitialized variable in amdgpu_vce_ring_parse_cs() drm/amd/powerplay: initialize a variable before using it drm/amd/powerplay: suppress KASAN out of bounds warning in vega10_populate_all_memory_levels drm/amd/amdgpu: fix evicted VRAM bo adjudgement condition
This commit is contained in:
commit
086711708b
|
@ -136,7 +136,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
|
|||
if (ring->funcs->end_use)
|
||||
ring->funcs->end_use(ring);
|
||||
|
||||
amdgpu_ring_lru_touch(ring->adev, ring);
|
||||
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)
|
||||
amdgpu_ring_lru_touch(ring->adev, ring);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1193,9 +1193,6 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
|||
unsigned long num_pages = bo->mem.num_pages;
|
||||
struct drm_mm_node *node = bo->mem.mm_node;
|
||||
|
||||
if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
|
||||
return ttm_bo_eviction_valuable(bo, place);
|
||||
|
||||
switch (bo->mem.mem_type) {
|
||||
case TTM_PL_TT:
|
||||
return true;
|
||||
|
@ -1210,7 +1207,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
|||
num_pages -= node->size;
|
||||
++node;
|
||||
}
|
||||
break;
|
||||
return false;
|
||||
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -648,7 +648,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
|
|||
uint32_t allocated = 0;
|
||||
uint32_t tmp, handle = 0;
|
||||
uint32_t *size = &tmp;
|
||||
int i, r, idx = 0;
|
||||
int i, r = 0, idx = 0;
|
||||
|
||||
p->job->vm = NULL;
|
||||
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
|
||||
|
|
|
@ -114,18 +114,19 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
|
|||
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
|
||||
{
|
||||
signed long r;
|
||||
unsigned long flags;
|
||||
uint32_t val, seq;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
struct amdgpu_ring *ring = &kiq->ring;
|
||||
|
||||
BUG_ON(!ring->funcs->emit_rreg);
|
||||
|
||||
spin_lock(&kiq->ring_lock);
|
||||
spin_lock_irqsave(&kiq->ring_lock, flags);
|
||||
amdgpu_ring_alloc(ring, 32);
|
||||
amdgpu_ring_emit_rreg(ring, reg);
|
||||
amdgpu_fence_emit_polling(ring, &seq);
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
|
||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||
if (r < 1) {
|
||||
|
@ -140,18 +141,19 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
|
|||
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
signed long r;
|
||||
unsigned long flags;
|
||||
uint32_t seq;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
struct amdgpu_ring *ring = &kiq->ring;
|
||||
|
||||
BUG_ON(!ring->funcs->emit_wreg);
|
||||
|
||||
spin_lock(&kiq->ring_lock);
|
||||
spin_lock_irqsave(&kiq->ring_lock, flags);
|
||||
amdgpu_ring_alloc(ring, 32);
|
||||
amdgpu_ring_emit_wreg(ring, reg, v);
|
||||
amdgpu_fence_emit_polling(ring, &seq);
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
|
||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||
if (r < 1)
|
||||
|
|
|
@ -1244,7 +1244,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
|
|||
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm)
|
||||
{
|
||||
int r;
|
||||
int r = 0;
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
while (!list_empty(&vm->relocated)) {
|
||||
|
|
|
@ -1807,6 +1807,10 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
|
|||
mem_channels = (cgs_read_register(hwmgr->device, reg) &
|
||||
DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
|
||||
DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
|
||||
PP_ASSERT_WITH_CODE(mem_channels < ARRAY_SIZE(channel_number),
|
||||
"Mem Channel Index Exceeded maximum!",
|
||||
return -1);
|
||||
|
||||
pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
|
||||
pp_table->MemoryChannelWidth =
|
||||
cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
|
||||
|
@ -3134,6 +3138,8 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
|||
minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
|
||||
|
||||
if (PP_CAP(PHM_PlatformCaps_StablePState)) {
|
||||
stable_pstate_sclk_dpm_percentage =
|
||||
data->registry_data.stable_pstate_sclk_dpm_percentage;
|
||||
PP_ASSERT_WITH_CODE(
|
||||
data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
|
||||
data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
|
||||
|
|
Loading…
Reference in New Issue