drm/i915: Some cleanups for the ppgtt lifetime handling

So when reviewing Michel's patch I've noticed a few things and cleaned
them up:
- The early checks in ppgtt_release are now redundant: The inactive
  list should always be empty now, so we can ditch these checks. Even
  for the aliasing ppgtt (though that's a different confusion) since
  we tear that down after all the objects are gone.
- The ppgtt handling functions are splattered all over. Consolidate
  them in i915_gem_gtt.c, give them OCD prefixes and add wrappers for
  get/put.
- There was a bit a confusion in ppgtt_release about whether it cares
  about the active or inactive list. It should care about them both,
  so augment the WARNINGs to check for both.

There's still create_vm_for_ctx left to do, put that is blocked on the
removal of ppgtt->ctx. Once that's done we can rename it to
i915_ppgtt_create and move it to its siblings for handling ppgtts.

v2: Move the ppgtt checks into the inline get/put functions as
suggested by Chris.

v3: Inline the now redundant ppgtt local variable.

Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Daniel Vetter 2014-08-06 15:04:45 +02:00
parent b9d06dd9d1
commit ee960be7bb
5 changed files with 33 additions and 43 deletions

View File

@ -2549,7 +2549,6 @@ void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base) #define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
#define vm_to_ppgtt(vm) container_of(vm, struct i915_hw_ppgtt, base) #define vm_to_ppgtt(vm) container_of(vm, struct i915_hw_ppgtt, base)
int __must_check i915_gem_context_init(struct drm_device *dev); int __must_check i915_gem_context_init(struct drm_device *dev);
void ppgtt_release(struct kref *kref);
void i915_gem_context_fini(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev); void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);

View File

@ -4477,7 +4477,6 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
void i915_gem_vma_destroy(struct i915_vma *vma) void i915_gem_vma_destroy(struct i915_vma *vma)
{ {
struct i915_address_space *vm = NULL; struct i915_address_space *vm = NULL;
struct i915_hw_ppgtt *ppgtt = NULL;
WARN_ON(vma->node.allocated); WARN_ON(vma->node.allocated);
/* Keep the vma as a placeholder in the execbuffer reservation lists */ /* Keep the vma as a placeholder in the execbuffer reservation lists */
@ -4485,10 +4484,8 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
return; return;
vm = vma->vm; vm = vma->vm;
ppgtt = vm_to_ppgtt(vm);
if (ppgtt) i915_ppgtt_put(vm_to_ppgtt(vm));
kref_put(&ppgtt->ref, ppgtt_release);
list_del(&vma->vma_link); list_del(&vma->vma_link);

View File

@ -96,33 +96,6 @@
#define GEN6_CONTEXT_ALIGN (64<<10) #define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096 #define GEN7_CONTEXT_ALIGN 4096
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
(list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
ppgtt->base.cleanup(&ppgtt->base);
return;
}
/* vmas should already be unbound */
WARN_ON(!list_empty(&vm->active_list));
ppgtt->base.cleanup(&ppgtt->base);
}
void ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
do_ppgtt_cleanup(ppgtt);
kfree(ppgtt);
}
static size_t get_context_alignment(struct drm_device *dev) static size_t get_context_alignment(struct drm_device *dev)
{ {
if (IS_GEN6(dev)) if (IS_GEN6(dev))
@ -174,8 +147,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
ppgtt = ctx_to_ppgtt(ctx); ppgtt = ctx_to_ppgtt(ctx);
} }
if (ppgtt) i915_ppgtt_put(ppgtt);
kref_put(&ppgtt->ref, ppgtt_release);
if (ctx->legacy_hw_ctx.rcs_state) if (ctx->legacy_hw_ctx.rcs_state)
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
list_del(&ctx->link); list_del(&ctx->link);
@ -222,7 +194,7 @@ create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
if (!ppgtt) if (!ppgtt)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ret = i915_gem_init_ppgtt(dev, ppgtt); ret = i915_ppgtt_init(dev, ppgtt);
if (ret) { if (ret) {
kfree(ppgtt); kfree(ppgtt);
return ERR_PTR(ret); return ERR_PTR(ret);
@ -342,7 +314,7 @@ i915_gem_create_context(struct drm_device *dev,
/* For platforms which only have aliasing PPGTT, we fake the /* For platforms which only have aliasing PPGTT, we fake the
* address space and refcounting. */ * address space and refcounting. */
ctx->vm = &dev_priv->mm.aliasing_ppgtt->base; ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
kref_get(&dev_priv->mm.aliasing_ppgtt->ref); i915_ppgtt_get(dev_priv->mm.aliasing_ppgtt);
} else } else
ctx->vm = &dev_priv->gtt.base; ctx->vm = &dev_priv->gtt.base;

View File

@ -1180,7 +1180,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
return 0; return 0;
} }
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0; int ret = 0;
@ -1211,6 +1211,19 @@ int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
return ret; return ret;
} }
void i915_ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
/* vmas should already be unbound */
WARN_ON(!list_empty(&ppgtt->base.active_list));
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
}
static void static void
ppgtt_bind_vma(struct i915_vma *vma, ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
@ -2148,15 +2161,12 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm) struct i915_address_space *vm)
{ {
struct i915_vma *vma; struct i915_vma *vma;
struct i915_hw_ppgtt *ppgtt = NULL;
vma = i915_gem_obj_to_vma(obj, vm); vma = i915_gem_obj_to_vma(obj, vm);
if (!vma) if (!vma)
vma = __i915_gem_vma_create(obj, vm); vma = __i915_gem_vma_create(obj, vm);
ppgtt = vm_to_ppgtt(vm); i915_ppgtt_get(vm_to_ppgtt(vm));
if (ppgtt)
kref_get(&ppgtt->ref);
return vma; return vma;
} }

View File

@ -272,7 +272,19 @@ void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end); unsigned long mappable_end, unsigned long end);
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
void i915_ppgtt_release(struct kref *kref);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
kref_get(&ppgtt->ref);
}
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
void i915_check_and_clear_faults(struct drm_device *dev); void i915_check_and_clear_faults(struct drm_device *dev);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_suspend_gtt_mappings(struct drm_device *dev);