mirror of https://gitee.com/openkylin/linux.git
drm/etnaviv: move dependency handling to scheduler
Move the fence dependency handling to the scheduler where it belongs. Jobs with unsignaled dependencies just get to sit in the scheduler queue without holding any locks. Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
This commit is contained in:
parent
e93b6deeb4
commit
683da226f8
|
@ -94,6 +94,9 @@ struct etnaviv_gem_submit_bo {
|
|||
u32 flags;
|
||||
struct etnaviv_gem_object *obj;
|
||||
struct etnaviv_vram_mapping *mapping;
|
||||
struct dma_fence *excl;
|
||||
unsigned int nr_shared;
|
||||
struct dma_fence **shared;
|
||||
};
|
||||
|
||||
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
|
||||
|
|
|
@ -170,29 +170,33 @@ static int submit_lock_objects(struct etnaviv_gem_submit *submit,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
|
||||
static int submit_fence_sync(struct etnaviv_gem_submit *submit)
|
||||
{
|
||||
unsigned int context = submit->gpu->fence_context;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < submit->nr_bos; i++) {
|
||||
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
|
||||
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
|
||||
bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
|
||||
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
|
||||
struct reservation_object *robj = bo->obj->resv;
|
||||
|
||||
ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
|
||||
explicit);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
|
||||
ret = reservation_object_reserve_shared(robj);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
|
||||
continue;
|
||||
|
||||
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
|
||||
ret = reservation_object_get_fences_rcu(robj, &bo->excl,
|
||||
&bo->nr_shared,
|
||||
&bo->shared);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
bo->excl = reservation_object_get_excl_rcu(robj);
|
||||
}
|
||||
|
||||
if (submit->flags & ETNA_SUBMIT_FENCE_FD_IN) {
|
||||
/*
|
||||
* Wait if the fence is from a foreign context, or if the fence
|
||||
* array contains any fence from a foreign context.
|
||||
*/
|
||||
if (!dma_fence_match_context(submit->in_fence, context))
|
||||
ret = dma_fence_wait(submit->in_fence, true);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1084,54 +1084,6 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
|
|||
return &f->base;
|
||||
}
|
||||
|
||||
int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
|
||||
unsigned int context, bool exclusive, bool explicit)
|
||||
{
|
||||
struct reservation_object *robj = etnaviv_obj->resv;
|
||||
struct reservation_object_list *fobj;
|
||||
struct dma_fence *fence;
|
||||
int i, ret;
|
||||
|
||||
if (!exclusive) {
|
||||
ret = reservation_object_reserve_shared(robj);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (explicit)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If we have any shared fences, then the exclusive fence
|
||||
* should be ignored as it will already have been signalled.
|
||||
*/
|
||||
fobj = reservation_object_get_list(robj);
|
||||
if (!fobj || fobj->shared_count == 0) {
|
||||
/* Wait on any existing exclusive fence which isn't our own */
|
||||
fence = reservation_object_get_excl(robj);
|
||||
if (fence && fence->context != context) {
|
||||
ret = dma_fence_wait(fence, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (!exclusive || !fobj)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < fobj->shared_count; i++) {
|
||||
fence = rcu_dereference_protected(fobj->shared[i],
|
||||
reservation_object_held(robj));
|
||||
if (fence->context != context) {
|
||||
ret = dma_fence_wait(fence, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* event management:
|
||||
*/
|
||||
|
|
|
@ -188,9 +188,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
|
|||
int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
|
||||
#endif
|
||||
|
||||
int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
|
||||
unsigned int context, bool exclusive, bool implicit);
|
||||
|
||||
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
|
||||
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
|
||||
u32 fence, struct timespec *timeout);
|
||||
|
|
|
@ -35,6 +35,51 @@ struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
|
|||
struct dma_fence *etnaviv_sched_dependency(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
|
||||
struct dma_fence *fence;
|
||||
int i;
|
||||
|
||||
if (unlikely(submit->in_fence)) {
|
||||
fence = submit->in_fence;
|
||||
submit->in_fence = NULL;
|
||||
|
||||
if (!dma_fence_is_signaled(fence))
|
||||
return fence;
|
||||
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
||||
for (i = 0; i < submit->nr_bos; i++) {
|
||||
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
|
||||
int j;
|
||||
|
||||
if (bo->excl) {
|
||||
fence = bo->excl;
|
||||
bo->excl = NULL;
|
||||
|
||||
if (!dma_fence_is_signaled(fence))
|
||||
return fence;
|
||||
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
||||
for (j = 0; j < bo->nr_shared; j++) {
|
||||
if (!bo->shared[j])
|
||||
continue;
|
||||
|
||||
fence = bo->shared[j];
|
||||
bo->shared[j] = NULL;
|
||||
|
||||
if (!dma_fence_is_signaled(fence))
|
||||
return fence;
|
||||
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
kfree(bo->shared);
|
||||
bo->nr_shared = 0;
|
||||
bo->shared = NULL;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue