mirror of https://gitee.com/openkylin/linux.git
drm/scheduler: Scheduler priority fixes (v2)
Remove DRM_SCHED_PRIORITY_LOW, as it was used in only one place. Rename and separate by a line DRM_SCHED_PRIORITY_MAX to DRM_SCHED_PRIORITY_COUNT as it represents a (total) count of said priorities and it is used as such in loops throughout the code. (0-based indexing is the the count number.) Remove redundant word HIGH in priority names, and rename *KERNEL* to *HIGH*, as it really means that, high. v2: Add back KERNEL and remove SW and HW, in lieu of a single HIGH between NORMAL and KERNEL. Signed-off-by: Luben Tuikov <luben.tuikov@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
c80e966b54
commit
e2d732fdb7
|
@ -46,7 +46,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
|
|||
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
|
||||
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
/* NORMAL and below are accessible by everyone */
|
||||
|
@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
|
|||
static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
|
||||
{
|
||||
switch (prio) {
|
||||
case DRM_SCHED_PRIORITY_HIGH_HW:
|
||||
case DRM_SCHED_PRIORITY_HIGH:
|
||||
case DRM_SCHED_PRIORITY_KERNEL:
|
||||
return AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
default:
|
||||
|
|
|
@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
|
|||
int i;
|
||||
|
||||
/* Signal all jobs not yet scheduled */
|
||||
for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
||||
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
||||
struct drm_sched_rq *rq = &sched->sched_rq[i];
|
||||
|
||||
if (!rq)
|
||||
|
|
|
@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
&ring->sched;
|
||||
}
|
||||
|
||||
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
|
||||
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
|
||||
atomic_set(&ring->num_jobs[i], 0);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -243,7 +243,7 @@ struct amdgpu_ring {
|
|||
bool has_compute_vm_bug;
|
||||
bool no_scheduler;
|
||||
|
||||
atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
|
||||
atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT];
|
||||
struct mutex priority_mutex;
|
||||
/* protected by priority_mutex */
|
||||
int priority;
|
||||
|
|
|
@ -36,14 +36,14 @@ enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
|
|||
{
|
||||
switch (amdgpu_priority) {
|
||||
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
|
||||
return DRM_SCHED_PRIORITY_HIGH_HW;
|
||||
return DRM_SCHED_PRIORITY_HIGH;
|
||||
case AMDGPU_CTX_PRIORITY_HIGH:
|
||||
return DRM_SCHED_PRIORITY_HIGH_SW;
|
||||
return DRM_SCHED_PRIORITY_HIGH;
|
||||
case AMDGPU_CTX_PRIORITY_NORMAL:
|
||||
return DRM_SCHED_PRIORITY_NORMAL;
|
||||
case AMDGPU_CTX_PRIORITY_LOW:
|
||||
case AMDGPU_CTX_PRIORITY_VERY_LOW:
|
||||
return DRM_SCHED_PRIORITY_LOW;
|
||||
return DRM_SCHED_PRIORITY_MIN;
|
||||
case AMDGPU_CTX_PRIORITY_UNSET:
|
||||
return DRM_SCHED_PRIORITY_UNSET;
|
||||
default:
|
||||
|
|
|
@ -2109,7 +2109,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
|||
ring = adev->mman.buffer_funcs_ring;
|
||||
sched = &ring->sched;
|
||||
r = drm_sched_entity_init(&adev->mman.entity,
|
||||
DRM_SCHED_PRIORITY_KERNEL, &sched,
|
||||
DRM_SCHED_PRIORITY_KERNEL, &sched,
|
||||
1, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
|
||||
|
|
|
@ -623,7 +623,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
|
|||
return NULL;
|
||||
|
||||
/* Kernel run queue has higher priority than normal run queue*/
|
||||
for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
||||
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
||||
entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
|
||||
if (entity)
|
||||
break;
|
||||
|
@ -851,7 +851,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
|
|||
sched->name = name;
|
||||
sched->timeout = timeout;
|
||||
sched->hang_limit = hang_limit;
|
||||
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
|
||||
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
|
||||
drm_sched_rq_init(sched, &sched->sched_rq[i]);
|
||||
|
||||
init_waitqueue_head(&sched->wake_up_worker);
|
||||
|
|
|
@ -33,14 +33,16 @@
|
|||
struct drm_gpu_scheduler;
|
||||
struct drm_sched_rq;
|
||||
|
||||
/* These are often used as an (initial) index
|
||||
* to an array, and as such should start at 0.
|
||||
*/
|
||||
enum drm_sched_priority {
|
||||
DRM_SCHED_PRIORITY_MIN,
|
||||
DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
|
||||
DRM_SCHED_PRIORITY_NORMAL,
|
||||
DRM_SCHED_PRIORITY_HIGH_SW,
|
||||
DRM_SCHED_PRIORITY_HIGH_HW,
|
||||
DRM_SCHED_PRIORITY_HIGH,
|
||||
DRM_SCHED_PRIORITY_KERNEL,
|
||||
DRM_SCHED_PRIORITY_MAX,
|
||||
|
||||
DRM_SCHED_PRIORITY_COUNT,
|
||||
DRM_SCHED_PRIORITY_INVALID = -1,
|
||||
DRM_SCHED_PRIORITY_UNSET = -2
|
||||
};
|
||||
|
@ -274,7 +276,7 @@ struct drm_gpu_scheduler {
|
|||
uint32_t hw_submission_limit;
|
||||
long timeout;
|
||||
const char *name;
|
||||
struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
|
||||
struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
|
||||
wait_queue_head_t wake_up_worker;
|
||||
wait_queue_head_t job_scheduled;
|
||||
atomic_t hw_rq_count;
|
||||
|
|
Loading…
Reference in New Issue