drm/i915: Flush request queue when waiting for ring space

During the review of

commit 1f70999f90
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Mon Jan 27 22:43:07 2014 +0000

    drm/i915: Prevent recursion by retiring requests when the ring is full

Ville raised the point that our interaction with request->tail was
likely to foul up other uses elsewhere (such as hang check comparing
ACTHD against requests).

However, we also need to restore the implicit retire requests that certain
test cases depend upon (e.g. igt/gem_exec_lut_handle), this raises the
spectre that the ppgtt will randomly call i915_gpu_idle() and recurse
back into intel_ring_begin().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=78023
Reviewed-by: Brad Volkin <bradley.d.volkin@intel.com>
[danvet: Remove now unused 'tail' variable as spotted by Brad.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Chris Wilson 2014-05-05 09:07:33 +01:00 committed by Daniel Vetter
parent dcfe050659
commit 1cf0ba1474
3 changed files with 17 additions and 28 deletions

View File

@ -2215,6 +2215,7 @@ struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_ring_buffer *ring); i915_gem_find_active_request(struct intel_ring_buffer *ring);
bool i915_gem_retire_requests(struct drm_device *dev); bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible); bool interruptible);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error) static inline bool i915_reset_in_progress(struct i915_gpu_error *error)

View File

@ -64,7 +64,6 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target); static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
static bool cpu_cache_is_coherent(struct drm_device *dev, static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level) enum i915_cache_level level)
@ -2448,7 +2447,7 @@ void i915_gem_reset(struct drm_device *dev)
/** /**
* This function clears the request list as sequence numbers are passed. * This function clears the request list as sequence numbers are passed.
*/ */
static void void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{ {
uint32_t seqno; uint32_t seqno;

View File

@ -40,12 +40,17 @@
*/ */
#define CACHELINE_BYTES 64 #define CACHELINE_BYTES 64
static inline int __ring_space(int head, int tail, int size)
{
int space = head - (tail + I915_RING_FREE_SPACE);
if (space < 0)
space += size;
return space;
}
static inline int ring_space(struct intel_ring_buffer *ring) static inline int ring_space(struct intel_ring_buffer *ring)
{ {
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); return __ring_space(ring->head & HEAD_ADDR, ring->tail, ring->size);
if (space < 0)
space += ring->size;
return space;
} }
static bool intel_ring_stopped(struct intel_ring_buffer *ring) static bool intel_ring_stopped(struct intel_ring_buffer *ring)
@ -1482,7 +1487,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
u32 seqno = 0, tail; u32 seqno = 0;
int ret; int ret;
if (ring->last_retired_head != -1) { if (ring->last_retired_head != -1) {
@ -1495,26 +1500,10 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
} }
list_for_each_entry(request, &ring->request_list, list) { list_for_each_entry(request, &ring->request_list, list) {
int space; if (__ring_space(request->tail, ring->tail, ring->size) >= n) {
if (request->tail == -1)
continue;
space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
if (space >= n) {
seqno = request->seqno; seqno = request->seqno;
tail = request->tail;
break; break;
} }
/* Consume this request in case we need more space than
* is available and so need to prevent a race between
* updating last_retired_head and direct reads of
* I915_RING_HEAD. It also provides a nice sanity check.
*/
request->tail = -1;
} }
if (seqno == 0) if (seqno == 0)
@ -1524,11 +1513,11 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
if (ret) if (ret)
return ret; return ret;
ring->head = tail; i915_gem_retire_requests_ring(ring);
ring->space = ring_space(ring); ring->head = ring->last_retired_head;
if (WARN_ON(ring->space < n)) ring->last_retired_head = -1;
return -ENOSPC;
ring->space = ring_space(ring);
return 0; return 0;
} }