From 0e89d0c16b9446a094215e71734e583c438bf83d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 4 Aug 2015 16:58:36 +0200 Subject: [PATCH] drm/amdgpu: stop leaking the ctx id into the scheduler v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Id's are for the IOCTL ABI only. v2: remove tgid as well Signed-off-by: Christian König Reviewed-by: Chunming Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 11 ++++------- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 10 +++------- drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 6 ------ 3 files changed, 7 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 3c353375b228..c2290ae20312 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -50,8 +50,7 @@ static void amdgpu_ctx_do_release(struct kref *ref) static void amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, - struct amdgpu_ctx *ctx, - uint32_t id) + struct amdgpu_ctx *ctx) { int i; memset(ctx, 0, sizeof(*ctx)); @@ -81,7 +80,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, return r; } *id = (uint32_t)r; - amdgpu_ctx_init(adev, fpriv, ctx, *id); + amdgpu_ctx_init(adev, fpriv, ctx); mutex_unlock(&mgr->lock); } else { if (adev->kernel_ctx) { @@ -89,8 +88,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, kfree(ctx); return 0; } - *id = AMD_KERNEL_CONTEXT_ID; - amdgpu_ctx_init(adev, fpriv, ctx, *id); + amdgpu_ctx_init(adev, fpriv, ctx); adev->kernel_ctx = ctx; } @@ -105,8 +103,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, rq = &adev->rings[i]->scheduler->kernel_rq; r = amd_context_entity_init(adev->rings[i]->scheduler, &ctx->rings[i].c_entity, - NULL, rq, *id, - amdgpu_sched_jobs); + NULL, rq, amdgpu_sched_jobs); if (r) break; } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 4ad1825e713e..b9aa572980d2 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -172,7 +172,7 @@ static struct amd_context_entity *select_context( * @entity The pointer to a valid amd_context_entity * @parent The parent entity of this amd_context_entity * @rq The run queue this entity belongs - * @context_id The context id for this entity + * @kernel If this is an entity for the kernel * @jobs The max number of jobs in the job queue * * return 0 if succeed. negative error code on failure @@ -181,7 +181,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched, struct amd_context_entity *entity, struct amd_sched_entity *parent, struct amd_run_queue *rq, - uint32_t context_id, uint32_t jobs) { uint64_t seq_ring = 0; @@ -203,9 +202,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched, return -EINVAL; spin_lock_init(&entity->queue_lock); - entity->tgid = (context_id == AMD_KERNEL_CONTEXT_ID) ? - AMD_KERNEL_PROCESS_ID : current->tgid; - entity->context_id = context_id; atomic64_set(&entity->last_emitted_v_seq, seq_ring); atomic64_set(&entity->last_queued_v_seq, seq_ring); @@ -275,9 +271,9 @@ int amd_context_entity_fini(struct amd_gpu_scheduler *sched, if (r) { if (entity->is_pending) - DRM_INFO("Entity %u is in waiting state during fini,\ + DRM_INFO("Entity %p is in waiting state during fini,\ all pending ibs will be canceled.\n", - entity->context_id); + entity); } mutex_lock(&rq->lock); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index fd6d699d42e1..c46d0854ab75 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -26,9 +26,6 @@ #include -#define AMD_KERNEL_CONTEXT_ID 0 -#define AMD_KERNEL_PROCESS_ID 0 - #define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 struct amd_gpu_scheduler; @@ -74,8 +71,6 @@ struct amd_context_entity { /* the virtual_seq is unique per context per ring */ atomic64_t last_queued_v_seq; atomic64_t last_emitted_v_seq; - pid_t tgid; - uint32_t context_id; /* the job_queue maintains the jobs submitted by clients */ struct kfifo job_queue; spinlock_t queue_lock; @@ -148,7 +143,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched, struct amd_context_entity *entity, struct amd_sched_entity *parent, struct amd_run_queue *rq, - uint32_t context_id, uint32_t jobs); void amd_sched_emit(struct amd_context_entity *c_entity, uint64_t seq);