mirror of https://gitee.com/openkylin/linux.git
drm/i915: Flatten engine init control flow
Now that sanity prevails and we have the clean split between software init and starting the engines we can drop all the "have we allocate this struct already?" nonsense. Execlist code could benefit quite a bit more still, but that's for another patch. Reviewed-by: Dave Gordon <david.s.gordon@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
parent
36d0a82ef4
commit
bfc882b4e3
|
@ -716,13 +716,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
|
|||
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
|
||||
BUG_ON(!validate_regs_sorted(ring));
|
||||
|
||||
if (hash_empty(ring->cmd_hash)) {
|
||||
ret = init_hash_table(ring, cmd_tables, cmd_table_count);
|
||||
if (ret) {
|
||||
DRM_ERROR("CMD: cmd_parser_init failed!\n");
|
||||
fini_hash_table(ring);
|
||||
return ret;
|
||||
}
|
||||
WARN_ON(!hash_empty(ring->cmd_hash));
|
||||
|
||||
ret = init_hash_table(ring, cmd_tables, cmd_table_count);
|
||||
if (ret) {
|
||||
DRM_ERROR("CMD: cmd_parser_init failed!\n");
|
||||
fini_hash_table(ring);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ring->needs_cmd_parser = true;
|
||||
|
|
|
@ -1833,8 +1833,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
|
|||
int ret;
|
||||
|
||||
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
|
||||
if (ctx->engine[ring->id].state)
|
||||
return 0;
|
||||
WARN_ON(ctx->engine[ring->id].state);
|
||||
|
||||
context_size = round_up(get_lr_context_size(ring), 4096);
|
||||
|
||||
|
|
|
@ -635,8 +635,7 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (ring->scratch.obj)
|
||||
return 0;
|
||||
WARN_ON(ring->scratch.obj);
|
||||
|
||||
ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
|
||||
if (ring->scratch.obj == NULL) {
|
||||
|
@ -1799,15 +1798,15 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
|||
static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
int ret;
|
||||
|
||||
if (ringbuf == NULL) {
|
||||
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
|
||||
if (!ringbuf)
|
||||
return -ENOMEM;
|
||||
ring->buffer = ringbuf;
|
||||
}
|
||||
WARN_ON(ring->buffer);
|
||||
|
||||
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
|
||||
if (!ringbuf)
|
||||
return -ENOMEM;
|
||||
ring->buffer = ringbuf;
|
||||
|
||||
ring->dev = dev;
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
|
@ -1830,21 +1829,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||
goto error;
|
||||
}
|
||||
|
||||
if (ringbuf->obj == NULL) {
|
||||
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
|
||||
ring->name, ret);
|
||||
goto error;
|
||||
}
|
||||
WARN_ON(ringbuf->obj);
|
||||
|
||||
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
|
||||
ring->name, ret);
|
||||
intel_destroy_ringbuffer_obj(ringbuf);
|
||||
goto error;
|
||||
}
|
||||
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
|
||||
ring->name, ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
|
||||
ring->name, ret);
|
||||
intel_destroy_ringbuffer_obj(ringbuf);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Workaround an erratum on the i830 which causes a hang if
|
||||
|
|
Loading…
Reference in New Issue