drm/i915: Make 'i915_gem_check_olr' actually check by request not seqno

Updated the _check_olr() function to actually take a request object and compare
it to the OLR rather than extracting seqnos and comparing those.

Note that there is one use case where the request object being processed is no
longer available at that point in the call stack. Hence a temporary copy of the
original function is still present (but called _check_ols() instead). This will
be removed in a subsequent patch.

Also, downgraded a BUG_ON to a WARN_ON as apparently the former is frowned upon
for shipping code.

For: VIZ-4377
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Thomas Daniel <Thomas.Daniel@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
John Harrison 2014-11-24 18:49:30 +00:00 committed by Daniel Vetter
parent 6259cead57
commit b6660d59f6
2 changed files with 28 additions and 18 deletions

View File

@ -2577,7 +2577,7 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible); bool interruptible);
int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno); int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error) static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{ {
@ -3117,4 +3117,20 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
} }
} }
/* XXX: Temporary solution to be removed later in patch series. */
static inline int __must_check i915_gem_check_ols(
struct intel_engine_cs *ring, u32 seqno)
{
int ret;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
ret = 0;
if (seqno == i915_gem_request_get_seqno(ring->outstanding_lazy_request))
ret = i915_add_request(ring, NULL);
return ret;
}
/* XXX: Temporary solution to be removed later in patch series. */
#endif #endif

View File

@ -1153,19 +1153,18 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
} }
/* /*
* Compare seqno against outstanding lazy request. Emit a request if they are * Compare arbitrary request against outstanding lazy request. Emit on match.
* equal.
*/ */
int int
i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno) i915_gem_check_olr(struct drm_i915_gem_request *req)
{ {
int ret; int ret;
BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
ret = 0; ret = 0;
if (seqno == i915_gem_request_get_seqno(ring->outstanding_lazy_request)) if (req == req->ring->outstanding_lazy_request)
ret = i915_add_request(ring, NULL); ret = i915_add_request(req->ring, NULL);
return ret; return ret;
} }
@ -1328,7 +1327,7 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_check_olr(ring, seqno); ret = i915_gem_check_ols(ring, seqno);
if (ret) if (ret)
return ret; return ret;
@ -1395,7 +1394,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = obj->ring; struct intel_engine_cs *ring = obj->ring;
unsigned reset_counter; unsigned reset_counter;
u32 seqno;
int ret; int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@ -1405,22 +1403,19 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (!req) if (!req)
return 0; return 0;
seqno = i915_gem_request_get_seqno(req);
WARN_ON(seqno == 0);
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_check_olr(ring, seqno); ret = i915_gem_check_olr(req);
if (ret) if (ret)
return ret; return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
i915_gem_request_reference(req); i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, ret = __i915_wait_seqno(ring, i915_gem_request_get_seqno(req),
file_priv); reset_counter, true, NULL, file_priv);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_request_unreference(req); i915_gem_request_unreference(req);
if (ret) if (ret)
@ -2880,8 +2875,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
int ret; int ret;
if (obj->active) { if (obj->active) {
ret = i915_gem_check_olr(obj->ring, ret = i915_gem_check_olr(obj->last_read_req);
i915_gem_request_get_seqno(obj->last_read_req));
if (ret) if (ret)
return ret; return ret;
@ -3011,7 +3005,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (seqno <= from->semaphore.sync_seqno[idx]) if (seqno <= from->semaphore.sync_seqno[idx])
return 0; return 0;
ret = i915_gem_check_olr(obj->ring, seqno); ret = i915_gem_check_olr(obj->last_read_req);
if (ret) if (ret)
return ret; return ret;