drm/i915: Flatten engine init control flow

Now that sanity prevails and we have the clean split between software
init and starting the engines we can drop all the "have we allocate
this struct already?" nonsense.

Execlist code could benefit quite a bit more still, but that's for
another patch.

Reviewed-by: Dave Gordon <david.s.gordon@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
Daniel Vetter 2014-11-20 00:33:08 +01:00
parent 36d0a82ef4
commit bfc882b4e3
3 changed files with 30 additions and 32 deletions

View File

@ -716,13 +716,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring)); BUG_ON(!validate_regs_sorted(ring));
if (hash_empty(ring->cmd_hash)) { WARN_ON(!hash_empty(ring->cmd_hash));
ret = init_hash_table(ring, cmd_tables, cmd_table_count);
if (ret) { ret = init_hash_table(ring, cmd_tables, cmd_table_count);
DRM_ERROR("CMD: cmd_parser_init failed!\n"); if (ret) {
fini_hash_table(ring); DRM_ERROR("CMD: cmd_parser_init failed!\n");
return ret; fini_hash_table(ring);
} return ret;
} }
ring->needs_cmd_parser = true; ring->needs_cmd_parser = true;

View File

@ -1833,8 +1833,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
int ret; int ret;
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
if (ctx->engine[ring->id].state) WARN_ON(ctx->engine[ring->id].state);
return 0;
context_size = round_up(get_lr_context_size(ring), 4096); context_size = round_up(get_lr_context_size(ring), 4096);

View File

@ -635,8 +635,7 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
{ {
int ret; int ret;
if (ring->scratch.obj) WARN_ON(ring->scratch.obj);
return 0;
ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
if (ring->scratch.obj == NULL) { if (ring->scratch.obj == NULL) {
@ -1799,15 +1798,15 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
static int intel_init_ring_buffer(struct drm_device *dev, static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *ring) struct intel_engine_cs *ring)
{ {
struct intel_ringbuffer *ringbuf = ring->buffer; struct intel_ringbuffer *ringbuf;
int ret; int ret;
if (ringbuf == NULL) { WARN_ON(ring->buffer);
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf) ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
return -ENOMEM; if (!ringbuf)
ring->buffer = ringbuf; return -ENOMEM;
} ring->buffer = ringbuf;
ring->dev = dev; ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->active_list);
@ -1830,21 +1829,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error; goto error;
} }
if (ringbuf->obj == NULL) { WARN_ON(ringbuf->obj);
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
ring->name, ret);
goto error;
}
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) { if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
ring->name, ret); ring->name, ret);
intel_destroy_ringbuffer_obj(ringbuf); goto error;
goto error; }
}
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
ring->name, ret);
intel_destroy_ringbuffer_obj(ringbuf);
goto error;
} }
/* Workaround an erratum on the i830 which causes a hang if /* Workaround an erratum on the i830 which causes a hang if