drm/i915/guc: Extract GuC stage desc pool creation into a helper

Since it's a two-step process, we can have a cleaner error handling in
the caller if we do the allocations in a helper.

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171025200020.16636-3-michal.winiarski@intel.com
This commit is contained in:
Michał Winiarski 2017-10-25 22:00:10 +02:00 committed by Chris Wilson
parent 21e8860ef4
commit 89922d0145
1 changed files with 38 additions and 27 deletions

View File

@ -311,6 +311,37 @@ static void guc_proc_desc_init(struct intel_guc *guc,
desc->priority = client->priority; desc->priority = client->priority;
} }
static int guc_stage_desc_pool_create(struct intel_guc *guc)
{
struct i915_vma *vma;
void *vaddr;
vma = intel_guc_allocate_vma(guc,
PAGE_ALIGN(sizeof(struct guc_stage_desc) *
GUC_MAX_STAGE_DESCRIPTORS));
if (IS_ERR(vma))
return PTR_ERR(vma);
vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma);
return PTR_ERR(vaddr);
}
guc->stage_desc_pool = vma;
guc->stage_desc_pool_vaddr = vaddr;
ida_init(&guc->stage_ids);
return 0;
}
static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
{
ida_destroy(&guc->stage_ids);
i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
i915_vma_unpin_and_release(&guc->stage_desc_pool);
}
/* /*
* Initialise/clear the stage descriptor shared with the GuC firmware. * Initialise/clear the stage descriptor shared with the GuC firmware.
* *
@ -953,47 +984,29 @@ static void guc_ads_destroy(struct intel_guc *guc)
int i915_guc_submission_init(struct drm_i915_private *dev_priv) int i915_guc_submission_init(struct drm_i915_private *dev_priv)
{ {
struct intel_guc *guc = &dev_priv->guc; struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
void *vaddr;
int ret; int ret;
if (guc->stage_desc_pool) if (guc->stage_desc_pool)
return 0; return 0;
vma = intel_guc_allocate_vma(guc, ret = guc_stage_desc_pool_create(guc);
PAGE_ALIGN(sizeof(struct guc_stage_desc) * if (ret)
GUC_MAX_STAGE_DESCRIPTORS)); return ret;
if (IS_ERR(vma))
return PTR_ERR(vma);
guc->stage_desc_pool = vma;
vaddr = i915_gem_object_pin_map(guc->stage_desc_pool->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto err_vma;
}
guc->stage_desc_pool_vaddr = vaddr;
ret = intel_guc_log_create(guc); ret = intel_guc_log_create(guc);
if (ret < 0) if (ret < 0)
goto err_vaddr; goto err_stage_desc_pool;
ret = guc_ads_create(guc); ret = guc_ads_create(guc);
if (ret < 0) if (ret < 0)
goto err_log; goto err_log;
ida_init(&guc->stage_ids);
return 0; return 0;
err_log: err_log:
intel_guc_log_destroy(guc); intel_guc_log_destroy(guc);
err_vaddr: err_stage_desc_pool:
i915_gem_object_unpin_map(guc->stage_desc_pool->obj); guc_stage_desc_pool_destroy(guc);
err_vma:
i915_vma_unpin_and_release(&guc->stage_desc_pool);
return ret; return ret;
} }
@ -1001,11 +1014,9 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{ {
struct intel_guc *guc = &dev_priv->guc; struct intel_guc *guc = &dev_priv->guc;
ida_destroy(&guc->stage_ids);
guc_ads_destroy(guc); guc_ads_destroy(guc);
intel_guc_log_destroy(guc); intel_guc_log_destroy(guc);
i915_gem_object_unpin_map(guc->stage_desc_pool->obj); guc_stage_desc_pool_destroy(guc);
i915_vma_unpin_and_release(&guc->stage_desc_pool);
} }
static void guc_interrupts_capture(struct drm_i915_private *dev_priv) static void guc_interrupts_capture(struct drm_i915_private *dev_priv)