mirror of https://gitee.com/openkylin/linux.git
Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next
Some more radeon and amdgpu stuff for drm-next. Mostly just bug fixes for new features and cleanups. * 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux: drm/amdgpu: fix rb bitmap & cu bitmap calculation drm/amdgpu: trace the pd_addr in vm_grab_id as well drm/amdgpu: fix VM faults caused by vm_grab_id() v4 drm/amdgpu: update radeon acpi header drm/radeon: update radeon acpi header drm/amd: cleanup get_mfd_cell_dev() drm/amdgpu: fix error handling in amdgpu_bo_list_set drm/amd/powerplay: fix code style warning. drm/amd: Do not make DRM_AMD_ACP default to y drm/amdgpu/gfx: fix off by one in rb rework (v2)
This commit is contained in:
commit
550e3b23a5
|
@ -2,7 +2,6 @@ menu "ACP Configuration"
|
|||
|
||||
config DRM_AMD_ACP
|
||||
bool "Enable ACP IP support"
|
||||
default y
|
||||
select MFD_CORE
|
||||
select PM_GENERIC_DOMAINS if PM
|
||||
help
|
||||
|
|
|
@ -769,8 +769,9 @@ struct amdgpu_ib {
|
|||
uint32_t *ptr;
|
||||
struct amdgpu_fence *fence;
|
||||
struct amdgpu_user_fence *user;
|
||||
bool grabbed_vmid;
|
||||
struct amdgpu_vm *vm;
|
||||
unsigned vm_id;
|
||||
uint64_t vm_pd_addr;
|
||||
struct amdgpu_ctx *ctx;
|
||||
uint32_t gds_base, gds_size;
|
||||
uint32_t gws_base, gws_size;
|
||||
|
@ -877,10 +878,10 @@ struct amdgpu_vm_pt {
|
|||
};
|
||||
|
||||
struct amdgpu_vm_id {
|
||||
unsigned id;
|
||||
uint64_t pd_gpu_addr;
|
||||
struct amdgpu_vm_manager_id *mgr_id;
|
||||
uint64_t pd_gpu_addr;
|
||||
/* last flushed PD/PT update */
|
||||
struct fence *flushed_updates;
|
||||
struct fence *flushed_updates;
|
||||
};
|
||||
|
||||
struct amdgpu_vm {
|
||||
|
@ -954,10 +955,11 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
|
|||
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync, struct fence *fence);
|
||||
struct amdgpu_sync *sync, struct fence *fence,
|
||||
unsigned *vm_id, uint64_t *vm_pd_addr);
|
||||
void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||
struct amdgpu_vm *vm,
|
||||
struct fence *updates);
|
||||
unsigned vmid,
|
||||
uint64_t pd_addr);
|
||||
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
|
||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
|
|
|
@ -240,12 +240,10 @@ static int acp_poweron(struct generic_pm_domain *genpd)
|
|||
static struct device *get_mfd_cell_dev(const char *device_name, int r)
|
||||
{
|
||||
char auto_dev_name[25];
|
||||
char buf[8];
|
||||
struct device *dev;
|
||||
|
||||
sprintf(buf, ".%d.auto", r);
|
||||
strcpy(auto_dev_name, device_name);
|
||||
strcat(auto_dev_name, buf);
|
||||
snprintf(auto_dev_name, sizeof(auto_dev_name),
|
||||
"%s.%d.auto", device_name, r);
|
||||
dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
|
||||
dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
|
||||
|
||||
|
|
|
@ -118,6 +118,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|||
usermm = amdgpu_ttm_tt_get_usermm(entry->robj->tbo.ttm);
|
||||
if (usermm) {
|
||||
if (usermm != current->mm) {
|
||||
amdgpu_bo_unref(&entry->robj);
|
||||
r = -EPERM;
|
||||
goto error_free;
|
||||
}
|
||||
|
@ -151,6 +152,8 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
|
||||
error_free:
|
||||
while (i--)
|
||||
amdgpu_bo_unref(&array[i].robj);
|
||||
drm_free_large(array);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -75,6 +75,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
}
|
||||
|
||||
ib->vm = vm;
|
||||
ib->vm_id = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -139,7 +140,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vm && !ibs->grabbed_vmid) {
|
||||
if (vm && !ibs->vm_id) {
|
||||
dev_err(adev->dev, "VM IB without ID\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -152,10 +153,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
|
||||
if (vm) {
|
||||
/* do context switch */
|
||||
amdgpu_vm_flush(ring, vm, last_vm_update);
|
||||
amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr);
|
||||
|
||||
if (ring->funcs->emit_gds_switch)
|
||||
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
|
||||
amdgpu_ring_emit_gds_switch(ring, ib->vm_id,
|
||||
ib->gds_base, ib->gds_size,
|
||||
ib->gws_base, ib->gws_size,
|
||||
ib->oa_base, ib->oa_size);
|
||||
|
|
|
@ -105,16 +105,23 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
|
|||
|
||||
struct fence *fence = amdgpu_sync_get_fence(&job->sync);
|
||||
|
||||
if (fence == NULL && vm && !job->ibs->grabbed_vmid) {
|
||||
if (fence == NULL && vm && !job->ibs->vm_id) {
|
||||
struct amdgpu_ring *ring = job->ring;
|
||||
unsigned i, vm_id;
|
||||
uint64_t vm_pd_addr;
|
||||
int r;
|
||||
|
||||
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
|
||||
&job->base.s_fence->base);
|
||||
&job->base.s_fence->base,
|
||||
&vm_id, &vm_pd_addr);
|
||||
if (r)
|
||||
DRM_ERROR("Error getting VM ID (%d)\n", r);
|
||||
else
|
||||
job->ibs->grabbed_vmid = true;
|
||||
else {
|
||||
for (i = 0; i < job->num_ibs; ++i) {
|
||||
job->ibs[i].vm_id = vm_id;
|
||||
job->ibs[i].vm_pd_addr = vm_pd_addr;
|
||||
}
|
||||
}
|
||||
|
||||
fence = amdgpu_sync_get_fence(&job->sync);
|
||||
}
|
||||
|
|
|
@ -100,21 +100,24 @@ TRACE_EVENT(amdgpu_sched_run_job,
|
|||
|
||||
|
||||
TRACE_EVENT(amdgpu_vm_grab_id,
|
||||
TP_PROTO(struct amdgpu_vm *vm, unsigned vmid, int ring),
|
||||
TP_ARGS(vm, vmid, ring),
|
||||
TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid,
|
||||
uint64_t pd_addr),
|
||||
TP_ARGS(vm, ring, vmid, pd_addr),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct amdgpu_vm *, vm)
|
||||
__field(u32, vmid)
|
||||
__field(u32, ring)
|
||||
__field(u32, vmid)
|
||||
__field(u64, pd_addr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vm = vm;
|
||||
__entry->vmid = vmid;
|
||||
__entry->ring = ring;
|
||||
__entry->vmid = vmid;
|
||||
__entry->pd_addr = pd_addr;
|
||||
),
|
||||
TP_printk("vm=%p, id=%u, ring=%u", __entry->vm, __entry->vmid,
|
||||
__entry->ring)
|
||||
TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm,
|
||||
__entry->ring, __entry->vmid, __entry->pd_addr)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_vm_bo_map,
|
||||
|
@ -231,8 +234,8 @@ TRACE_EVENT(amdgpu_vm_flush,
|
|||
__entry->ring = ring;
|
||||
__entry->id = id;
|
||||
),
|
||||
TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
|
||||
__entry->pd_addr, __entry->ring, __entry->id)
|
||||
TP_printk("ring=%u, id=%u, pd_addr=%010Lx",
|
||||
__entry->ring, __entry->id, __entry->pd_addr)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_bo_list_set,
|
||||
|
|
|
@ -50,6 +50,9 @@
|
|||
* SI supports 16.
|
||||
*/
|
||||
|
||||
/* Special value that no flush is necessary */
|
||||
#define AMDGPU_VM_NO_FLUSH (~0ll)
|
||||
|
||||
/**
|
||||
* amdgpu_vm_num_pde - return the number of page directory entries
|
||||
*
|
||||
|
@ -157,50 +160,70 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
|
|||
* Allocate an id for the vm, adding fences to the sync obj as necessary.
|
||||
*/
|
||||
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync, struct fence *fence)
|
||||
struct amdgpu_sync *sync, struct fence *fence,
|
||||
unsigned *vm_id, uint64_t *vm_pd_addr)
|
||||
{
|
||||
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
|
||||
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_vm_manager_id *id;
|
||||
struct amdgpu_vm_id *id = &vm->ids[ring->idx];
|
||||
struct fence *updates = sync->last_vm_update;
|
||||
int r;
|
||||
|
||||
mutex_lock(&adev->vm_manager.lock);
|
||||
|
||||
/* check if the id is still valid */
|
||||
if (vm_id->id) {
|
||||
if (id->mgr_id) {
|
||||
struct fence *flushed = id->flushed_updates;
|
||||
bool is_later;
|
||||
long owner;
|
||||
|
||||
id = &adev->vm_manager.ids[vm_id->id];
|
||||
owner = atomic_long_read(&id->owner);
|
||||
if (owner == (long)vm) {
|
||||
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
|
||||
trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
|
||||
if (!flushed)
|
||||
is_later = true;
|
||||
else if (!updates)
|
||||
is_later = false;
|
||||
else
|
||||
is_later = fence_is_later(updates, flushed);
|
||||
|
||||
fence_put(id->active);
|
||||
id->active = fence_get(fence);
|
||||
owner = atomic_long_read(&id->mgr_id->owner);
|
||||
if (!is_later && owner == (long)id &&
|
||||
pd_addr == id->pd_gpu_addr) {
|
||||
|
||||
fence_put(id->mgr_id->active);
|
||||
id->mgr_id->active = fence_get(fence);
|
||||
|
||||
list_move_tail(&id->mgr_id->list,
|
||||
&adev->vm_manager.ids_lru);
|
||||
|
||||
*vm_id = id->mgr_id - adev->vm_manager.ids;
|
||||
*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
|
||||
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
|
||||
*vm_pd_addr);
|
||||
|
||||
mutex_unlock(&adev->vm_manager.lock);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* we definately need to flush */
|
||||
vm_id->pd_gpu_addr = ~0ll;
|
||||
|
||||
id = list_first_entry(&adev->vm_manager.ids_lru,
|
||||
struct amdgpu_vm_manager_id,
|
||||
list);
|
||||
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
|
||||
atomic_long_set(&id->owner, (long)vm);
|
||||
|
||||
vm_id->id = id - adev->vm_manager.ids;
|
||||
trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
|
||||
|
||||
r = amdgpu_sync_fence(ring->adev, sync, id->active);
|
||||
id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru,
|
||||
struct amdgpu_vm_manager_id,
|
||||
list);
|
||||
|
||||
r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
|
||||
if (!r) {
|
||||
fence_put(id->active);
|
||||
id->active = fence_get(fence);
|
||||
fence_put(id->mgr_id->active);
|
||||
id->mgr_id->active = fence_get(fence);
|
||||
|
||||
fence_put(id->flushed_updates);
|
||||
id->flushed_updates = fence_get(updates);
|
||||
|
||||
id->pd_gpu_addr = pd_addr;
|
||||
|
||||
list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru);
|
||||
atomic_long_set(&id->mgr_id->owner, (long)id);
|
||||
|
||||
*vm_id = id->mgr_id - adev->vm_manager.ids;
|
||||
*vm_pd_addr = pd_addr;
|
||||
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
|
||||
}
|
||||
|
||||
mutex_unlock(&adev->vm_manager.lock);
|
||||
|
@ -211,35 +234,18 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||
* amdgpu_vm_flush - hardware flush the vm
|
||||
*
|
||||
* @ring: ring to use for flush
|
||||
* @vm: vm we want to flush
|
||||
* @updates: last vm update that we waited for
|
||||
* @vmid: vmid number to use
|
||||
* @pd_addr: address of the page directory
|
||||
*
|
||||
* Flush the vm.
|
||||
* Emit a VM flush when it is necessary.
|
||||
*/
|
||||
void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||
struct amdgpu_vm *vm,
|
||||
struct fence *updates)
|
||||
unsigned vmid,
|
||||
uint64_t pd_addr)
|
||||
{
|
||||
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
|
||||
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
|
||||
struct fence *flushed_updates = vm_id->flushed_updates;
|
||||
bool is_later;
|
||||
|
||||
if (!flushed_updates)
|
||||
is_later = true;
|
||||
else if (!updates)
|
||||
is_later = false;
|
||||
else
|
||||
is_later = fence_is_later(updates, flushed_updates);
|
||||
|
||||
if (pd_addr != vm_id->pd_gpu_addr || is_later) {
|
||||
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
|
||||
if (is_later) {
|
||||
vm_id->flushed_updates = fence_get(updates);
|
||||
fence_put(flushed_updates);
|
||||
}
|
||||
vm_id->pd_gpu_addr = pd_addr;
|
||||
amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
|
||||
if (pd_addr != AMDGPU_VM_NO_FLUSH) {
|
||||
trace_amdgpu_vm_flush(pd_addr, ring->idx, vmid);
|
||||
amdgpu_ring_emit_vm_flush(ring, vmid, pd_addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1284,7 +1290,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
int i, r;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
vm->ids[i].id = 0;
|
||||
vm->ids[i].mgr_id = NULL;
|
||||
vm->ids[i].flushed_updates = NULL;
|
||||
}
|
||||
vm->va = RB_ROOT;
|
||||
|
@ -1381,13 +1387,13 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
amdgpu_bo_unref(&vm->page_directory);
|
||||
fence_put(vm->page_directory_fence);
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
unsigned id = vm->ids[i].id;
|
||||
struct amdgpu_vm_id *id = &vm->ids[i];
|
||||
|
||||
atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
|
||||
(long)vm, 0);
|
||||
fence_put(vm->ids[i].flushed_updates);
|
||||
if (id->mgr_id)
|
||||
atomic_long_cmpxchg(&id->mgr_id->owner,
|
||||
(long)id, 0);
|
||||
fence_put(id->flushed_updates);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -212,7 +212,7 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
|
||||
u32 extra_bits = ib->vm_id & 0xf;
|
||||
u32 next_rptr = ring->wptr + 5;
|
||||
|
||||
while ((next_rptr & 7) != 4)
|
||||
|
|
|
@ -46,9 +46,6 @@
|
|||
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
|
||||
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
|
||||
|
||||
#define CIK_RB_BITMAP_WIDTH_PER_SH 2
|
||||
#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
|
||||
|
||||
#define AMDGPU_NUM_OF_VMIDS 8
|
||||
|
||||
#define PIPEID(x) ((x) << 0)
|
||||
|
|
|
@ -1635,30 +1635,25 @@ static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
|
|||
static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
u32 data, tmp, num_rbs = 0;
|
||||
u32 data;
|
||||
u32 active_rbs = 0;
|
||||
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
|
||||
adev->gfx.config.max_sh_per_se;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
gfx_v7_0_select_se_sh(adev, i, j);
|
||||
data = gfx_v7_0_get_rb_active_bitmap(adev);
|
||||
if (adev->asic_type == CHIP_HAWAII)
|
||||
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
|
||||
HAWAII_RB_BITMAP_WIDTH_PER_SH);
|
||||
else
|
||||
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
|
||||
CIK_RB_BITMAP_WIDTH_PER_SH);
|
||||
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
|
||||
rb_bitmap_width_per_sh);
|
||||
}
|
||||
}
|
||||
gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
adev->gfx.config.backend_enable_mask = active_rbs;
|
||||
tmp = active_rbs;
|
||||
while (tmp >>= 1)
|
||||
num_rbs++;
|
||||
adev->gfx.config.num_rbs = num_rbs;
|
||||
adev->gfx.config.num_rbs = hweight32(active_rbs);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2046,8 +2041,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||
else
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
|
||||
|
||||
control |= ib->length_dw |
|
||||
(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
|
||||
control |= ib->length_dw | (ib->vm_id << 24);
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
amdgpu_ring_write(ring,
|
||||
|
@ -2075,8 +2069,7 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
|||
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
|
||||
|
||||
control |= ib->length_dw |
|
||||
(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
|
||||
control |= ib->length_dw | (ib->vm_id << 24);
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
amdgpu_ring_write(ring,
|
||||
|
@ -3825,8 +3818,7 @@ static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
|
|||
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
|
||||
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
|
||||
|
||||
mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se /
|
||||
adev->gfx.config.max_sh_per_se);
|
||||
mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
|
||||
|
||||
return (~data) & mask;
|
||||
}
|
||||
|
@ -5237,6 +5229,8 @@ int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
|
|||
if (!adev || !cu_info)
|
||||
return -EINVAL;
|
||||
|
||||
memset(cu_info, 0, sizeof(*cu_info));
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
|
|
|
@ -2613,8 +2613,10 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
|
|||
static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
u32 data, tmp, num_rbs = 0;
|
||||
u32 data;
|
||||
u32 active_rbs = 0;
|
||||
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
|
||||
adev->gfx.config.max_sh_per_se;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
|
@ -2622,17 +2624,14 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
|
|||
gfx_v8_0_select_se_sh(adev, i, j);
|
||||
data = gfx_v8_0_get_rb_active_bitmap(adev);
|
||||
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
|
||||
RB_BITMAP_WIDTH_PER_SH);
|
||||
rb_bitmap_width_per_sh);
|
||||
}
|
||||
}
|
||||
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
adev->gfx.config.backend_enable_mask = active_rbs;
|
||||
tmp = active_rbs;
|
||||
while (tmp >>= 1)
|
||||
num_rbs++;
|
||||
adev->gfx.config.num_rbs = num_rbs;
|
||||
adev->gfx.config.num_rbs = hweight32(active_rbs);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4622,8 +4621,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||
else
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
|
||||
|
||||
control |= ib->length_dw |
|
||||
(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
|
||||
control |= ib->length_dw | (ib->vm_id << 24);
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
amdgpu_ring_write(ring,
|
||||
|
@ -4652,8 +4650,7 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
|||
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
|
||||
|
||||
control |= ib->length_dw |
|
||||
(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
|
||||
control |= ib->length_dw | (ib->vm_id << 24);
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
amdgpu_ring_write(ring,
|
||||
|
@ -5131,8 +5128,7 @@ static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
|
|||
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
|
||||
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
|
||||
|
||||
mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se /
|
||||
adev->gfx.config.max_sh_per_se);
|
||||
mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
|
||||
|
||||
return (~data) & mask;
|
||||
}
|
||||
|
@ -5146,6 +5142,8 @@ int gfx_v8_0_get_cu_info(struct amdgpu_device *adev,
|
|||
if (!adev || !cu_info)
|
||||
return -EINVAL;
|
||||
|
||||
memset(cu_info, 0, sizeof(*cu_info));
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
|
|
|
@ -244,7 +244,7 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
|
||||
u32 vmid = ib->vm_id & 0xf;
|
||||
u32 next_rptr = ring->wptr + 5;
|
||||
|
||||
while ((next_rptr & 7) != 2)
|
||||
|
|
|
@ -355,7 +355,7 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
|
||||
u32 vmid = ib->vm_id & 0xf;
|
||||
u32 next_rptr = ring->wptr + 5;
|
||||
|
||||
while ((next_rptr & 7) != 2)
|
||||
|
|
|
@ -71,8 +71,6 @@
|
|||
#define VMID(x) ((x) << 4)
|
||||
#define QUEUEID(x) ((x) << 8)
|
||||
|
||||
#define RB_BITMAP_WIDTH_PER_SH 2
|
||||
|
||||
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
|
||||
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
|
||||
#define MC_SEQ_MISC0__MT__DDR2 0x20000000
|
||||
|
|
|
@ -340,6 +340,8 @@ struct atcs_pref_req_output {
|
|||
# define ATPX_FIXED_NOT_SUPPORTED (1 << 9)
|
||||
# define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10)
|
||||
# define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11)
|
||||
# define ATPX_DGPU_CAN_DRIVE_DISPLAYS (1 << 12)
|
||||
# define ATPX_MS_HYBRID_GFX_SUPPORTED (1 << 14)
|
||||
#define ATPX_FUNCTION_POWER_CONTROL 0x2
|
||||
/* ARG0: ATPX_FUNCTION_POWER_CONTROL
|
||||
* ARG1:
|
||||
|
|
|
@ -606,7 +606,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
|
|||
|
||||
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
|
||||
hwmgr->hwmgr_func->set_pp_table == NULL)
|
||||
return -EINVAL;
|
||||
return -EINVAL;
|
||||
|
||||
return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size);
|
||||
}
|
||||
|
@ -623,7 +623,7 @@ static int pp_dpm_force_clock_level(void *handle,
|
|||
|
||||
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
|
||||
hwmgr->hwmgr_func->force_clock_level == NULL)
|
||||
return -EINVAL;
|
||||
return -EINVAL;
|
||||
|
||||
return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, level);
|
||||
}
|
||||
|
|
|
@ -291,6 +291,8 @@ int radeon_atif_handler(struct radeon_device *rdev,
|
|||
# define ATPX_FIXED_NOT_SUPPORTED (1 << 9)
|
||||
# define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10)
|
||||
# define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11)
|
||||
# define ATPX_DGPU_CAN_DRIVE_DISPLAYS (1 << 12)
|
||||
# define ATPX_MS_HYBRID_GFX_SUPPORTED (1 << 14)
|
||||
#define ATPX_FUNCTION_POWER_CONTROL 0x2
|
||||
/* ARG0: ATPX_FUNCTION_POWER_CONTROL
|
||||
* ARG1:
|
||||
|
|
Loading…
Reference in New Issue