drm/i915: Compress GPU objects in error state

Our error states are quickly growing, pinning kernel memory with them.
The majority of the space is taken up by the error objects. These
compress well using zlib and without decode are mostly meaningless, so
encoding them does not hinder quickly parsing the error state for
familiarity.

v2: Make the zlib dependency optional

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161012090522.367-6-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2016-10-12 10:05:22 +01:00
parent fc4c79c37e
commit 0a97015d45
3 changed files with 169 additions and 22 deletions

View File

@ -60,6 +60,18 @@ config DRM_I915_CAPTURE_ERROR
If in doubt, say "Y". If in doubt, say "Y".
config DRM_I915_COMPRESS_ERROR
bool "Compress GPU error state"
depends on DRM_I915_CAPTURE_ERROR
select ZLIB_DEFLATE
default y
help
This option selects ZLIB_DEFLATE if it isn't already
selected and causes any error state captured upon a GPU hang
to be compressed using zlib.
If in doubt, say "Y".
config DRM_I915_USERPTR config DRM_I915_USERPTR
bool "Always enable userptr support" bool "Always enable userptr support"
depends on DRM_I915 depends on DRM_I915

View File

@ -820,9 +820,10 @@ struct drm_i915_error_state {
struct intel_instdone instdone; struct intel_instdone instdone;
struct drm_i915_error_object { struct drm_i915_error_object {
int page_count;
u64 gtt_offset; u64 gtt_offset;
u64 gtt_size; u64 gtt_size;
int page_count;
int unused;
u32 *pages[0]; u32 *pages[0];
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;

View File

@ -29,6 +29,7 @@
#include <generated/utsrelease.h> #include <generated/utsrelease.h>
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
#include <linux/zlib.h>
#include "i915_drv.h" #include "i915_drv.h"
static const char *engine_str(int engine) static const char *engine_str(int engine)
@ -173,6 +174,110 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
#define err_puts(e, s) i915_error_puts(e, s) #define err_puts(e, s) i915_error_puts(e, s)
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
static bool compress_init(struct z_stream_s *zstream)
{
memset(zstream, 0, sizeof(*zstream));
zstream->workspace =
kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
GFP_ATOMIC | __GFP_NOWARN);
if (!zstream->workspace)
return false;
if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
kfree(zstream->workspace);
return false;
}
return true;
}
static int compress_page(struct z_stream_s *zstream,
void *src,
struct drm_i915_error_object *dst)
{
zstream->next_in = src;
zstream->avail_in = PAGE_SIZE;
do {
if (zstream->avail_out == 0) {
unsigned long page;
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
return -ENOMEM;
dst->pages[dst->page_count++] = (void *)page;
zstream->next_out = (void *)page;
zstream->avail_out = PAGE_SIZE;
}
if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
return -EIO;
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
if (0 && zstream->total_out > zstream->total_in)
return -E2BIG;
return 0;
}
static void compress_fini(struct z_stream_s *zstream,
struct drm_i915_error_object *dst)
{
if (dst) {
zlib_deflate(zstream, Z_FINISH);
dst->unused = zstream->avail_out;
}
zlib_deflateEnd(zstream);
kfree(zstream->workspace);
}
static void err_compression_marker(struct drm_i915_error_state_buf *m)
{
err_puts(m, ":");
}
#else
static bool compress_init(struct z_stream_s *zstream)
{
return true;
}
static int compress_page(struct z_stream_s *zstream,
void *src,
struct drm_i915_error_object *dst)
{
unsigned long page;
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
return -ENOMEM;
dst->pages[dst->page_count++] =
memcpy((void *)page, src, PAGE_SIZE);
return 0;
}
static void compress_fini(struct z_stream_s *zstream,
struct drm_i915_error_object *dst)
{
}
static void err_compression_marker(struct drm_i915_error_state_buf *m)
{
err_puts(m, "~");
}
#endif
static void print_error_buffers(struct drm_i915_error_state_buf *m, static void print_error_buffers(struct drm_i915_error_state_buf *m,
const char *name, const char *name,
struct drm_i915_error_buffer *err, struct drm_i915_error_buffer *err,
@ -340,12 +445,36 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args); va_end(args);
} }
static int
ascii85_encode_len(int len)
{
return DIV_ROUND_UP(len, 4);
}
static bool
ascii85_encode(u32 in, char *out)
{
int i;
if (in == 0)
return false;
out[5] = '\0';
for (i = 5; i--; ) {
out[i] = '!' + in % 85;
in /= 85;
}
return true;
}
static void print_error_obj(struct drm_i915_error_state_buf *m, static void print_error_obj(struct drm_i915_error_state_buf *m,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
const char *name, const char *name,
struct drm_i915_error_object *obj) struct drm_i915_error_object *obj)
{ {
int page, offset, elt; char out[6];
int page;
if (!obj) if (!obj)
return; return;
@ -357,13 +486,23 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
lower_32_bits(obj->gtt_offset)); lower_32_bits(obj->gtt_offset));
} }
for (page = offset = 0; page < obj->page_count; page++) { err_compression_marker(m);
for (elt = 0; elt < PAGE_SIZE/4; elt++) { for (page = 0; page < obj->page_count; page++) {
err_printf(m, "%08x : %08x\n", offset, int i, len;
obj->pages[page][elt]);
offset += 4; len = PAGE_SIZE;
if (page == obj->page_count - 1)
len -= obj->unused;
len = ascii85_encode_len(len);
for (i = 0; i < len; i++) {
if (ascii85_encode(obj->pages[page][i], out))
err_puts(m, out);
else
err_puts(m, "z");
} }
} }
err_puts(m, "\n");
} }
static void err_print_capabilities(struct drm_i915_error_state_buf *m, static void err_print_capabilities(struct drm_i915_error_state_buf *m,
@ -643,20 +782,6 @@ static void i915_error_state_free(struct kref *error_ref)
kfree(error); kfree(error);
} }
static int compress_page(void *src, struct drm_i915_error_object *dst)
{
unsigned long page;
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
return -ENOMEM;
dst->pages[dst->page_count++] = (void *)page;
memcpy((void *)page, src, PAGE_SIZE);
return 0;
}
static struct drm_i915_error_object * static struct drm_i915_error_object *
i915_error_object_create(struct drm_i915_private *i915, i915_error_object_create(struct drm_i915_private *i915,
struct i915_vma *vma) struct i915_vma *vma)
@ -664,6 +789,7 @@ i915_error_object_create(struct drm_i915_private *i915,
struct i915_ggtt *ggtt = &i915->ggtt; struct i915_ggtt *ggtt = &i915->ggtt;
const u64 slot = ggtt->error_capture.start; const u64 slot = ggtt->error_capture.start;
struct drm_i915_error_object *dst; struct drm_i915_error_object *dst;
struct z_stream_s zstream;
unsigned long num_pages; unsigned long num_pages;
struct sgt_iter iter; struct sgt_iter iter;
dma_addr_t dma; dma_addr_t dma;
@ -672,6 +798,7 @@ i915_error_object_create(struct drm_i915_private *i915,
return NULL; return NULL;
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
GFP_ATOMIC | __GFP_NOWARN); GFP_ATOMIC | __GFP_NOWARN);
if (!dst) if (!dst)
@ -680,6 +807,12 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->gtt_offset = vma->node.start; dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size; dst->gtt_size = vma->node.size;
dst->page_count = 0; dst->page_count = 0;
dst->unused = 0;
if (!compress_init(&zstream)) {
kfree(dst);
return NULL;
}
for_each_sgt_dma(dma, iter, vma->pages) { for_each_sgt_dma(dma, iter, vma->pages) {
void __iomem *s; void __iomem *s;
@ -689,7 +822,7 @@ i915_error_object_create(struct drm_i915_private *i915,
I915_CACHE_NONE, 0); I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->mappable, slot); s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
ret = compress_page((void * __force)s, dst); ret = compress_page(&zstream, (void __force *)s, dst);
io_mapping_unmap_atomic(s); io_mapping_unmap_atomic(s);
if (ret) if (ret)
@ -704,6 +837,7 @@ i915_error_object_create(struct drm_i915_private *i915,
dst = NULL; dst = NULL;
out: out:
compress_fini(&zstream, dst);
ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE, true); ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE, true);
return dst; return dst;
} }