drm/i915: Forgo last_fence active request tracking

We were using the last_fence to track the last request that used this
vma that might be interpreted by a fence register and forced ourselves
to wait for this request before modifying any fence register that
overlapped our vma. Due to requirement that we need to track any XY_BLT
command, linear or tiled, this in effect meant that we have to track the
vma for its active lifespan anyway, so we can forgo the explicit
last_fence tracking and just use the whole vma->active.

Another solution would be to pipeline the register updates, and would
help resolve some long running stalls for gen3 (but only gen 2 and 3!)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190812174804.26180-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-08-12 18:48:03 +01:00
parent cf1c97dcb9
commit 3d6792cf0a
5 changed files with 3 additions and 22 deletions

View File

@ -212,9 +212,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
}
if (vma->fence)
seq_printf(m, " , fence: %d%s",
vma->fence->id,
i915_active_request_isset(&vma->last_fence) ? "*" : "");
seq_printf(m, " , fence: %d", vma->fence->id);
seq_puts(m, ")");
spin_lock(&obj->vma.lock);

View File

@ -230,16 +230,14 @@ static int fence_update(struct i915_fence_reg *fence,
i915_gem_object_get_tiling(vma->obj)))
return -EINVAL;
ret = i915_active_request_retire(&vma->last_fence,
&vma->obj->base.dev->struct_mutex);
ret = i915_active_wait(&vma->active);
if (ret)
return ret;
}
old = xchg(&fence->vma, NULL);
if (old) {
ret = i915_active_request_retire(&old->last_fence,
&old->obj->base.dev->struct_mutex);
ret = i915_active_wait(&old->active);
if (ret) {
fence->vma = old;
return ret;

View File

@ -1867,7 +1867,6 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
return ERR_PTR(-ENOMEM);
i915_active_init(i915, &vma->active, NULL, NULL);
INIT_ACTIVE_REQUEST(&vma->last_fence);
vma->vm = &ggtt->vm;
vma->ops = &pd_vma_ops;

View File

@ -120,7 +120,6 @@ vma_create(struct drm_i915_gem_object *obj,
i915_active_init(vm->i915, &vma->active,
__i915_vma_active, __i915_vma_retire);
INIT_ACTIVE_REQUEST(&vma->last_fence);
/* Declare ourselves safe for use inside shrinkers */
if (IS_ENABLED(CONFIG_LOCKDEP)) {
@ -802,8 +801,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
mutex_lock(&vma->vm->mutex);
list_del(&vma->vm_link);
mutex_unlock(&vma->vm->mutex);
@ -928,9 +925,6 @@ int i915_vma_move_to_active(struct i915_vma *vma,
obj->read_domains |= I915_GEM_GPU_DOMAINS;
obj->mm.dirty = true;
if (flags & EXEC_OBJECT_NEEDS_FENCE)
__i915_active_request_set(&vma->last_fence, rq);
GEM_BUG_ON(!i915_vma_is_active(vma));
return 0;
}
@ -961,14 +955,7 @@ int i915_vma_unbind(struct i915_vma *vma)
* before we are finished).
*/
__i915_vma_pin(vma);
ret = i915_active_wait(&vma->active);
if (ret)
goto unpin;
ret = i915_active_request_retire(&vma->last_fence,
&vma->vm->i915->drm.struct_mutex);
unpin:
__i915_vma_unpin(vma);
if (ret)
return ret;

View File

@ -111,7 +111,6 @@ struct i915_vma {
#define I915_VMA_GGTT_WRITE BIT(14)
struct i915_active active;
struct i915_active_request last_fence;
/**
* Support different GGTT views into the same object.