drm/i915: Make debugfs/per_file_stats scale better

Currently we walk the entire list of obj->vma for each obj within a file
to find the matching vma of this context. Since we know we are searching
for a particular vma bound to a user context, we can use the rbtree to
search for it rather than repeatedly walk everything.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190808162407.28121-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-08-08 17:24:07 +01:00
parent c990b4c359
commit 5b5efdf79a
1 changed files with 37 additions and 22 deletions

View File

@ -240,7 +240,6 @@ struct file_stats {
struct i915_address_space *vm; struct i915_address_space *vm;
unsigned long count; unsigned long count;
u64 total, unbound; u64 total, unbound;
u64 global, shared;
u64 active, inactive; u64 active, inactive;
u64 closed; u64 closed;
}; };
@ -251,48 +250,64 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data; struct file_stats *stats = data;
struct i915_vma *vma; struct i915_vma *vma;
lockdep_assert_held(&obj->base.dev->struct_mutex);
stats->count++; stats->count++;
stats->total += obj->base.size; stats->total += obj->base.size;
if (!atomic_read(&obj->bind_count)) if (!atomic_read(&obj->bind_count))
stats->unbound += obj->base.size; stats->unbound += obj->base.size;
if (obj->base.name || obj->base.dma_buf)
stats->shared += obj->base.size;
list_for_each_entry(vma, &obj->vma.list, obj_link) { spin_lock(&obj->vma.lock);
if (!drm_mm_node_allocated(&vma->node)) if (!stats->vm) {
continue; for_each_ggtt_vma(vma, obj) {
if (!drm_mm_node_allocated(&vma->node))
if (i915_vma_is_ggtt(vma)) {
stats->global += vma->node.size;
} else {
if (vma->vm != stats->vm)
continue; continue;
if (i915_vma_is_active(vma))
stats->active += vma->node.size;
else
stats->inactive += vma->node.size;
if (i915_vma_is_closed(vma))
stats->closed += vma->node.size;
} }
} else {
struct rb_node *p = obj->vma.tree.rb_node;
if (i915_vma_is_active(vma)) while (p) {
stats->active += vma->node.size; long cmp;
else
stats->inactive += vma->node.size;
if (i915_vma_is_closed(vma)) vma = rb_entry(p, typeof(*vma), obj_node);
stats->closed += vma->node.size; cmp = i915_vma_compare(vma, stats->vm, NULL);
if (cmp == 0) {
if (drm_mm_node_allocated(&vma->node)) {
if (i915_vma_is_active(vma))
stats->active += vma->node.size;
else
stats->inactive += vma->node.size;
if (i915_vma_is_closed(vma))
stats->closed += vma->node.size;
}
break;
}
if (cmp < 0)
p = p->rb_right;
else
p = p->rb_left;
}
} }
spin_unlock(&obj->vma.lock);
return 0; return 0;
} }
#define print_file_stats(m, name, stats) do { \ #define print_file_stats(m, name, stats) do { \
if (stats.count) \ if (stats.count) \
seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
name, \ name, \
stats.count, \ stats.count, \
stats.total, \ stats.total, \
stats.active, \ stats.active, \
stats.inactive, \ stats.inactive, \
stats.global, \
stats.shared, \
stats.unbound, \ stats.unbound, \
stats.closed); \ stats.closed); \
} while (0) } while (0)