mirror of https://gitee.com/openkylin/linux.git
drm/i915: Check domains for userptr on release
When we return pages to the system, we release control over them and should defensively return them to the CPU write domain so that we catch any external writes on reacquiring them (e.g. to transparently swapout/swapin). While we did this defensive clflushing for ordinary shmem pages, it was forgotten for userptr. Fortunately, userptr objects are normally cache coherent and so oblivious to the forgotten domain tracking. References:a679f58d05
("drm/i915: Flush pages on acquisition") References:754a254427
("drm/i915: Skip object locking around a no-op set-domain ioctl") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.william.auld@gmail.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190331094620.15185-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
cde5f7edd5
commit
ee8efa8079
|
@ -308,7 +308,7 @@ static void __start_cpu_write(struct drm_i915_gem_object *obj)
|
||||||
obj->cache_dirty = true;
|
obj->cache_dirty = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
void
|
||||||
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||||
struct sg_table *pages,
|
struct sg_table *pages,
|
||||||
bool needs_clflush)
|
bool needs_clflush)
|
||||||
|
@ -2202,7 +2202,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
__i915_gem_object_release_shmem(obj, pages, true);
|
__i915_gem_object_release_shmem(obj, pages, true);
|
||||||
|
|
||||||
i915_gem_gtt_finish_pages(obj, pages);
|
i915_gem_gtt_finish_pages(obj, pages);
|
||||||
|
|
||||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||||
|
|
|
@ -502,4 +502,8 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
|
||||||
unsigned int cache_level);
|
unsigned int cache_level);
|
||||||
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
|
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
|
||||||
|
|
||||||
|
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||||
|
struct sg_table *pages,
|
||||||
|
bool needs_clflush);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -673,9 +673,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
|
||||||
if (!pages)
|
if (!pages)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (obj->mm.madv != I915_MADV_WILLNEED)
|
__i915_gem_object_release_shmem(obj, pages, true);
|
||||||
obj->mm.dirty = false;
|
|
||||||
|
|
||||||
i915_gem_gtt_finish_pages(obj, pages);
|
i915_gem_gtt_finish_pages(obj, pages);
|
||||||
|
|
||||||
for_each_sgt_page(page, sgt_iter, pages) {
|
for_each_sgt_page(page, sgt_iter, pages) {
|
||||||
|
|
Loading…
Reference in New Issue