mirror of https://gitee.com/openkylin/linux.git
drm/i915/selftests: Replace opencoded clflush with drm_clflush_virt_range
We occasionally see that the clflush prior to a read of GPU data is
returning stale data, reminiscent of much earlier bugs fixed by adding a
second clflush for serialisation. As drm_clflush_virt_range() already
supplies the workaround, use it rather than open code the clflush
instruction.
References: 396f5d62d1
("drm: Restore double clflush on the last partial cacheline")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180730075351.15569-3-chris@chris-wilson.co.uk
This commit is contained in:
parent
39f3be162c
commit
f6844a85e0
|
@ -33,7 +33,8 @@ static int cpu_set(struct drm_i915_gem_object *obj,
|
|||
{
|
||||
unsigned int needs_clflush;
|
||||
struct page *page;
|
||||
u32 *map;
|
||||
void *map;
|
||||
u32 *cpu;
|
||||
int err;
|
||||
|
||||
err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
|
||||
|
@ -42,24 +43,19 @@ static int cpu_set(struct drm_i915_gem_object *obj,
|
|||
|
||||
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
|
||||
map = kmap_atomic(page);
|
||||
cpu = map + offset_in_page(offset);
|
||||
|
||||
if (needs_clflush & CLFLUSH_BEFORE) {
|
||||
mb();
|
||||
clflush(map+offset_in_page(offset) / sizeof(*map));
|
||||
mb();
|
||||
}
|
||||
if (needs_clflush & CLFLUSH_BEFORE)
|
||||
drm_clflush_virt_range(cpu, sizeof(*cpu));
|
||||
|
||||
map[offset_in_page(offset) / sizeof(*map)] = v;
|
||||
*cpu = v;
|
||||
|
||||
if (needs_clflush & CLFLUSH_AFTER) {
|
||||
mb();
|
||||
clflush(map+offset_in_page(offset) / sizeof(*map));
|
||||
mb();
|
||||
}
|
||||
if (needs_clflush & CLFLUSH_AFTER)
|
||||
drm_clflush_virt_range(cpu, sizeof(*cpu));
|
||||
|
||||
kunmap_atomic(map);
|
||||
|
||||
i915_gem_obj_finish_shmem_access(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -69,7 +65,8 @@ static int cpu_get(struct drm_i915_gem_object *obj,
|
|||
{
|
||||
unsigned int needs_clflush;
|
||||
struct page *page;
|
||||
u32 *map;
|
||||
void *map;
|
||||
u32 *cpu;
|
||||
int err;
|
||||
|
||||
err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
|
||||
|
@ -78,17 +75,16 @@ static int cpu_get(struct drm_i915_gem_object *obj,
|
|||
|
||||
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
|
||||
map = kmap_atomic(page);
|
||||
cpu = map + offset_in_page(offset);
|
||||
|
||||
if (needs_clflush & CLFLUSH_BEFORE) {
|
||||
mb();
|
||||
clflush(map+offset_in_page(offset) / sizeof(*map));
|
||||
mb();
|
||||
}
|
||||
if (needs_clflush & CLFLUSH_BEFORE)
|
||||
drm_clflush_virt_range(cpu, sizeof(*cpu));
|
||||
|
||||
*v = *cpu;
|
||||
|
||||
*v = map[offset_in_page(offset) / sizeof(*map)];
|
||||
kunmap_atomic(map);
|
||||
|
||||
i915_gem_obj_finish_shmem_access(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue