drm/i915: Clean up VMAs before freeing

It's quite common for an object to simply be on the inactive list (and
not unbound) when we want to free the context. This of course happens
with lazy unbinding. Simply, this is needed when an object isn't fully
unbound but we want to free one VMA of the object, for whatever reason.

NOTE: The aliasing PPGTT is not a proper VM, so it needs special casing.

This addresses the fixup requirement mentioned in:
drm/915: Better reset handling for contexts

In the flink, and dmabuf case, we can't assert that the object isn't
still active. To keep it more generic, just check the vma's link in the
object vma list. If we wanted to do a better job, we could track last
seqno (and active) per VMA. It was decided not to do this in the last
iteration. Unfortunately this means the assertion can miss real bugs
when using flink/dmabuf.

v2: Use the newer introduced i915_gem_evict_vm(). Note that handling the
aliasing PPGTT is special.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Ben Widawsky 2013-12-06 14:11:23 -08:00 committed by Daniel Vetter
parent e20780439b
commit 679845ede0
1 changed files with 40 additions and 12 deletions

View File

@ -2260,6 +2260,17 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
struct i915_address_space *vm,
int min_size,
unsigned alignment,
unsigned cache_level,
bool mappable,
bool nonblock);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_gtt.c */
void i915_check_and_clear_faults(struct drm_device *dev);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
@ -2297,22 +2308,39 @@ static inline bool intel_enable_ppgtt(struct drm_device *dev, bool full)
static inline void ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt = container_of(kref, struct i915_hw_ppgtt, ref);
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
(list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
ppgtt->base.cleanup(&ppgtt->base);
return;
}
/*
* Make sure vmas are unbound before we take down the drm_mm
*
* FIXME: Proper refcounting should take care of this, this shouldn't be
* needed at all.
*/
if (!list_empty(&vm->active_list)) {
struct i915_vma *vma;
list_for_each_entry(vma, &vm->active_list, mm_list)
if (WARN_ON(list_empty(&vma->vma_link) ||
list_is_singular(&vma->vma_link)))
break;
i915_gem_evict_vm(&ppgtt->base, true);
} else {
i915_gem_retire_requests(dev);
i915_gem_evict_vm(&ppgtt->base, false);
}
ppgtt->base.cleanup(&ppgtt->base);
}
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
struct i915_address_space *vm,
int min_size,
unsigned alignment,
unsigned cache_level,
bool mappable,
bool nonblock);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);