drm/amdgpu: Optimize a function called by every IB sheduling

Move several if statements and a loop statment from
  run time to initialization time.

Signed-off-by: Alex Xie <AlexBin.Xie@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Alex Xie 2017-05-30 17:10:16 -04:00 committed by Alex Deucher
parent 1410f64651
commit dd684d313e
3 changed files with 40 additions and 27 deletions

View File

@ -152,6 +152,36 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
ring->funcs->end_use(ring);
}
/**
* amdgpu_ring_check_compute_vm_bug - check whether this ring has compute vm bug
*
* @adev: amdgpu_device pointer
* @ring: amdgpu_ring structure holding ring information
*/
static void amdgpu_ring_check_compute_vm_bug(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
const struct amdgpu_ip_block *ip_block;
ring->has_compute_vm_bug = false;
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
/* only compute rings */
return;
ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (!ip_block)
return;
/* Compute ring has a VM bug for GFX version < 7.
And compute ring has a VM bug for GFX 8 MEC firmware version < 673.*/
if (ip_block->version->major <= 7) {
ring->has_compute_vm_bug = true;
} else if (ip_block->version->major == 8)
if (adev->gfx.mec_fw_version < 673)
ring->has_compute_vm_bug = true;
}
/**
* amdgpu_ring_init - init driver ring struct.
*
@ -257,6 +287,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
amdgpu_ring_check_compute_vm_bug(adev, ring);
return 0;
}

View File

@ -185,6 +185,7 @@ struct amdgpu_ring {
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
unsigned vm_inv_eng;
bool has_compute_vm_bug;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
#endif
@ -207,4 +208,9 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
}
static inline bool amdgpu_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
{
return ring->has_compute_vm_bug;
}
#endif

View File

@ -656,32 +656,6 @@ static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev,
return r;
}
static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
const struct amdgpu_ip_block *ip_block;
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
/* only compute rings */
return false;
ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (!ip_block)
return false;
if (ip_block->version->major <= 7) {
/* gfx7 has no workaround */
return true;
} else if (ip_block->version->major == 8) {
if (adev->gfx.mec_fw_version >= 673)
/* gfx8 is fixed in MEC firmware 673 */
return false;
else
return true;
}
return false;
}
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job)
{
@ -691,7 +665,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_vm_id *id;
bool gds_switch_needed;
bool vm_flush_needed = job->vm_needs_flush ||
amdgpu_vm_ring_has_compute_vm_bug(ring);
amdgpu_ring_has_compute_vm_bug(ring);
if (job->vm_id == 0)
return false;