drm/i915: Update add_request() to take a request structure

Now that all callers of i915_add_request() have a request pointer to hand, it is
possible to update the add request function to take a request pointer rather
than pulling it out of the OLR.

For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
John Harrison 2015-05-29 17:43:49 +01:00 committed by Daniel Vetter
parent 6258fbe23f
commit 75289874e4
7 changed files with 23 additions and 22 deletions

View File

@ -2890,14 +2890,14 @@ void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev);
void __i915_add_request(struct intel_engine_cs *ring, void __i915_add_request(struct drm_i915_gem_request *req,
struct drm_file *file, struct drm_file *file,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
bool flush_caches); bool flush_caches);
#define i915_add_request(ring) \ #define i915_add_request(req) \
__i915_add_request(ring, NULL, NULL, true) __i915_add_request(req, NULL, NULL, true)
#define i915_add_request_no_flush(ring) \ #define i915_add_request_no_flush(req) \
__i915_add_request(ring, NULL, NULL, false) __i915_add_request(req, NULL, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req, int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter, unsigned reset_counter,
bool interruptible, bool interruptible,

View File

@ -1158,7 +1158,7 @@ i915_gem_check_olr(struct drm_i915_gem_request *req)
WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
if (req == req->ring->outstanding_lazy_request) if (req == req->ring->outstanding_lazy_request)
i915_add_request(req->ring); i915_add_request(req);
return 0; return 0;
} }
@ -2468,25 +2468,25 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
* request is not being tracked for completion but the work itself is * request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm). * going to happen on the hardware. This would be a Bad Thing(tm).
*/ */
void __i915_add_request(struct intel_engine_cs *ring, void __i915_add_request(struct drm_i915_gem_request *request,
struct drm_file *file, struct drm_file *file,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
bool flush_caches) bool flush_caches)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct intel_engine_cs *ring;
struct drm_i915_gem_request *request; struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf; struct intel_ringbuffer *ringbuf;
u32 request_start; u32 request_start;
int ret; int ret;
request = ring->outstanding_lazy_request;
if (WARN_ON(request == NULL)) if (WARN_ON(request == NULL))
return; return;
if (i915.enable_execlists) { ring = request->ring;
ringbuf = request->ctx->engine[ring->id].ringbuf; dev_priv = ring->dev->dev_private;
} else ringbuf = request->ringbuf;
ringbuf = ring->buffer;
WARN_ON(request != ring->outstanding_lazy_request);
/* /*
* To ensure that this call will not fail, space for its emissions * To ensure that this call will not fail, space for its emissions
@ -3338,7 +3338,7 @@ int i915_gpu_idle(struct drm_device *dev)
return ret; return ret;
} }
i915_add_request_no_flush(req->ring); i915_add_request_no_flush(req);
} }
WARN_ON(ring->outstanding_lazy_request); WARN_ON(ring->outstanding_lazy_request);
@ -5122,7 +5122,7 @@ i915_gem_init_hw(struct drm_device *dev)
goto out; goto out;
} }
i915_add_request_no_flush(ring); i915_add_request_no_flush(req);
} }
out: out:

View File

@ -1066,7 +1066,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
params->ring->gpu_caches_dirty = true; params->ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */ /* Add a breadcrumb for the completion of the batch buffer */
__i915_add_request(params->ring, params->file, params->batch_obj, true); __i915_add_request(params->request, params->file, params->batch_obj, true);
} }
static int static int

View File

@ -11497,7 +11497,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
} }
if (request) if (request)
i915_add_request_no_flush(request->ring); i915_add_request_no_flush(request);
work->flip_queued_vblank = drm_crtc_vblank_count(crtc); work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
work->enable_stall_check = true; work->enable_stall_check = true;

View File

@ -2242,7 +2242,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
goto error; goto error;
} }
i915_add_request_no_flush(req->ring); i915_add_request_no_flush(req);
} }
ctx->rcs_initialized = true; ctx->rcs_initialized = true;

View File

@ -217,7 +217,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
WARN_ON(overlay->last_flip_req); WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, req); i915_gem_request_assign(&overlay->last_flip_req, req);
i915_add_request(req->ring); i915_add_request(req);
overlay->flip_tail = tail; overlay->flip_tail = tail;
ret = i915_wait_request(overlay->last_flip_req); ret = i915_wait_request(overlay->last_flip_req);
@ -299,7 +299,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
WARN_ON(overlay->last_flip_req); WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, req); i915_gem_request_assign(&overlay->last_flip_req, req);
i915_add_request(req->ring); i915_add_request(req);
return 0; return 0;
} }

View File

@ -2167,8 +2167,9 @@ int intel_ring_idle(struct intel_engine_cs *ring)
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
/* We need to add any requests required to flush the objects and ring */ /* We need to add any requests required to flush the objects and ring */
WARN_ON(ring->outstanding_lazy_request);
if (ring->outstanding_lazy_request) if (ring->outstanding_lazy_request)
i915_add_request(ring); i915_add_request(ring->outstanding_lazy_request);
/* Wait upon the last request to be completed */ /* Wait upon the last request to be completed */
if (list_empty(&ring->request_list)) if (list_empty(&ring->request_list))