drm/i915: Refactor the wait_rendering completion into a common routine

Harmonise the completion logic between the non-blocking and normal
wait_rendering paths, and move that logic into a common function.

In the process, we note that the last_write_seqno is by definition the
earlier of the two read/write seqnos and so all successful waits will
have passed the last_write_seqno. Therefore we can unconditionally clear
the write seqno and its domains in the completion logic.

v2: Add the missing ring parameter, because sometimes it is good to have
things compile.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Chris Wilson 2013-06-29 22:05:26 +01:00 committed by Daniel Vetter
parent daa13e1ca5
commit d26e3af842
1 changed files with 23 additions and 25 deletions

View File

@ -1087,6 +1087,25 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
interruptible, NULL); interruptible, NULL);
} }
static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
i915_gem_retire_requests_ring(ring);
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*
* Note that the last_write_seqno is always the earlier of
* the two (read/write) seqno, so if we haved successfully waited,
* we know we have passed the last write.
*/
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
return 0;
}
/** /**
* Ensures that all rendering to the object has completed and the object is * Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU. * safe to unbind from the GTT or access from the CPU.
@ -1107,18 +1126,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests_ring(ring); return i915_gem_object_wait_rendering__tail(obj, ring);
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*/
if (obj->last_write_seqno &&
i915_seqno_passed(seqno, obj->last_write_seqno)) {
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
return 0;
} }
/* A nonblocking variant of the above wait. This is a highly dangerous routine /* A nonblocking variant of the above wait. This is a highly dangerous routine
@ -1154,20 +1162,10 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
i915_gem_retire_requests_ring(ring); return i915_gem_object_wait_rendering__tail(obj, ring);
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*/
if (ret == 0 &&
obj->last_write_seqno &&
i915_seqno_passed(seqno, obj->last_write_seqno)) {
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
return ret;
} }
/** /**