drm/i915: Use engine->context_pin() to report the intel_ring
Since unifying ringbuffer/execlist submission to use engine->pin_context, we ensure that the intel_ring is available before we start constructing the request. We can therefore move the assignment of the request->ring to the central i915_gem_request_alloc() and not require it in every engine->request_alloc() callback. Another small step towards simplification (of the core, but at a cost of handling error pointers in less important callers of engine->pin_context). v2: Rearrange a few branches to reduce impact of PTR_ERR() on gcc's code generation. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Oscar Mateo <oscar.mateo@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Oscar Mateo <oscar.mateo@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170504093308.4137-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
c944a308a9
commit
266a240bf0
|
@ -180,6 +180,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|||
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
||||
struct drm_i915_gem_request *rq;
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_ring *ring;
|
||||
int ret;
|
||||
|
||||
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
||||
|
@ -198,8 +199,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|||
* shadow_ctx pages invalid. So gvt need to pin itself. After update
|
||||
* the guest context, gvt can unpin the shadow_ctx safely.
|
||||
*/
|
||||
ret = engine->context_pin(engine, shadow_ctx);
|
||||
if (ret) {
|
||||
ring = engine->context_pin(engine, shadow_ctx);
|
||||
if (IS_ERR(ring)) {
|
||||
ret = PTR_ERR(ring);
|
||||
gvt_vgpu_err("fail to pin shadow context\n");
|
||||
workload->status = ret;
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
|
|
@ -551,6 +551,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_ring *ring;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
@ -565,9 +566,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
|||
* GGTT space, so do this first before we reserve a seqno for
|
||||
* ourselves.
|
||||
*/
|
||||
ret = engine->context_pin(engine, ctx);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
ring = engine->context_pin(engine, ctx);
|
||||
if (IS_ERR(ring))
|
||||
return ERR_CAST(ring);
|
||||
GEM_BUG_ON(!ring);
|
||||
|
||||
ret = reserve_seqno(engine);
|
||||
if (ret)
|
||||
|
@ -633,6 +635,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
|||
req->i915 = dev_priv;
|
||||
req->engine = engine;
|
||||
req->ctx = ctx;
|
||||
req->ring = ring;
|
||||
|
||||
/* No zalloc, must clear what we need by hand */
|
||||
req->global_seqno = 0;
|
||||
|
|
|
@ -744,6 +744,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = stream->dev_priv;
|
||||
struct intel_engine_cs *engine = dev_priv->engine[RCS];
|
||||
struct intel_ring *ring;
|
||||
int ret;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
|
||||
|
@ -755,9 +756,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
|
|||
*
|
||||
* NB: implied RCS engine...
|
||||
*/
|
||||
ret = engine->context_pin(engine, stream->ctx);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
ring = engine->context_pin(engine, stream->ctx);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
if (IS_ERR(ring))
|
||||
return PTR_ERR(ring);
|
||||
|
||||
/* Explicitly track the ID (instead of calling i915_ggtt_offset()
|
||||
* on the fly) considering the difference with gen8+ and
|
||||
|
@ -766,10 +768,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
|
|||
dev_priv->perf.oa.specific_ctx_id =
|
||||
i915_ggtt_offset(stream->ctx->engine[engine->id].state);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -469,6 +469,7 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
|
|||
*/
|
||||
int intel_engine_init_common(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_ring *ring;
|
||||
int ret;
|
||||
|
||||
engine->set_default_submission(engine);
|
||||
|
@ -480,9 +481,9 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
|
|||
* be available. To avoid this we always pin the default
|
||||
* context.
|
||||
*/
|
||||
ret = engine->context_pin(engine, engine->i915->kernel_context);
|
||||
if (ret)
|
||||
return ret;
|
||||
ring = engine->context_pin(engine, engine->i915->kernel_context);
|
||||
if (IS_ERR(ring))
|
||||
return PTR_ERR(ring);
|
||||
|
||||
ret = intel_engine_init_breadcrumbs(engine);
|
||||
if (ret)
|
||||
|
|
|
@ -740,8 +740,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
|
|||
/* XXX Do we need to preempt to make room for us and our deps? */
|
||||
}
|
||||
|
||||
static int execlists_context_pin(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx)
|
||||
static struct intel_ring *
|
||||
execlists_context_pin(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx)
|
||||
{
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
unsigned int flags;
|
||||
|
@ -750,8 +751,8 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
|
|||
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
|
||||
if (ce->pin_count++)
|
||||
return 0;
|
||||
if (likely(ce->pin_count++))
|
||||
goto out;
|
||||
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
|
||||
|
||||
if (!ce->state) {
|
||||
|
@ -788,7 +789,8 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
|
|||
ce->state->obj->mm.dirty = true;
|
||||
|
||||
i915_gem_context_get(ctx);
|
||||
return 0;
|
||||
out:
|
||||
return ce->ring;
|
||||
|
||||
unpin_map:
|
||||
i915_gem_object_unpin_map(ce->state->obj);
|
||||
|
@ -796,7 +798,7 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
|
|||
__i915_vma_unpin(ce->state);
|
||||
err:
|
||||
ce->pin_count = 0;
|
||||
return ret;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void execlists_context_unpin(struct intel_engine_cs *engine,
|
||||
|
@ -833,9 +835,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
|
|||
*/
|
||||
request->reserved_space += EXECLISTS_REQUEST_SIZE;
|
||||
|
||||
GEM_BUG_ON(!ce->ring);
|
||||
request->ring = ce->ring;
|
||||
|
||||
if (i915.enable_guc_submission) {
|
||||
/*
|
||||
* Check that the GuC has space for the request before
|
||||
|
|
|
@ -1475,16 +1475,17 @@ alloc_context_vma(struct intel_engine_cs *engine)
|
|||
return vma;
|
||||
}
|
||||
|
||||
static int intel_ring_context_pin(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx)
|
||||
static struct intel_ring *
|
||||
intel_ring_context_pin(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx)
|
||||
{
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
|
||||
if (ce->pin_count++)
|
||||
return 0;
|
||||
if (likely(ce->pin_count++))
|
||||
goto out;
|
||||
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
|
||||
|
||||
if (!ce->state && engine->context_size) {
|
||||
|
@ -1493,7 +1494,7 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
|
|||
vma = alloc_context_vma(engine);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
goto error;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ce->state = vma;
|
||||
|
@ -1502,7 +1503,7 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
|
|||
if (ce->state) {
|
||||
ret = context_pin(ctx);
|
||||
if (ret)
|
||||
goto error;
|
||||
goto err;
|
||||
|
||||
ce->state->obj->mm.dirty = true;
|
||||
}
|
||||
|
@ -1518,11 +1519,14 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
|
|||
ce->initialised = true;
|
||||
|
||||
i915_gem_context_get(ctx);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
out:
|
||||
/* One ringbuffer to rule them all */
|
||||
return engine->buffer;
|
||||
|
||||
err:
|
||||
ce->pin_count = 0;
|
||||
return ret;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void intel_ring_context_unpin(struct intel_engine_cs *engine,
|
||||
|
@ -1634,9 +1638,6 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
|
|||
*/
|
||||
request->reserved_space += LEGACY_REQUEST_SIZE;
|
||||
|
||||
GEM_BUG_ON(!request->engine->buffer);
|
||||
request->ring = request->engine->buffer;
|
||||
|
||||
cs = intel_ring_begin(request, 0);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
|
|
@ -271,8 +271,8 @@ struct intel_engine_cs {
|
|||
|
||||
void (*set_default_submission)(struct intel_engine_cs *engine);
|
||||
|
||||
int (*context_pin)(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx);
|
||||
struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx);
|
||||
void (*context_unpin)(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx);
|
||||
int (*request_alloc)(struct drm_i915_gem_request *req);
|
||||
|
|
|
@ -52,11 +52,12 @@ static void hw_delay_complete(unsigned long data)
|
|||
spin_unlock(&engine->hw_lock);
|
||||
}
|
||||
|
||||
static int mock_context_pin(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx)
|
||||
static struct intel_ring *
|
||||
mock_context_pin(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx)
|
||||
{
|
||||
i915_gem_context_get(ctx);
|
||||
return 0;
|
||||
return engine->buffer;
|
||||
}
|
||||
|
||||
static void mock_context_unpin(struct intel_engine_cs *engine,
|
||||
|
@ -72,7 +73,6 @@ static int mock_request_alloc(struct drm_i915_gem_request *request)
|
|||
INIT_LIST_HEAD(&mock->link);
|
||||
mock->delay = 0;
|
||||
|
||||
request->ring = request->engine->buffer;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue