2009-02-18 09:08:50 +08:00
|
|
|
/*
|
|
|
|
* Copyright © 2008 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Eric Anholt <eric@anholt.net>
|
|
|
|
* Keith Packard <keithp@keithp.com>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2017-03-03 17:13:38 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2019-04-05 19:00:08 +08:00
|
|
|
#include <linux/sort.h>
|
|
|
|
|
2019-01-18 05:03:34 +08:00
|
|
|
#include <drm/drm_debugfs.h>
|
2019-06-13 16:44:15 +08:00
|
|
|
|
2019-05-28 17:29:49 +08:00
|
|
|
#include "gem/i915_gem_context.h"
|
2020-04-30 19:18:12 +08:00
|
|
|
#include "gt/intel_gt_buffer_pool.h"
|
2020-04-25 00:28:05 +08:00
|
|
|
#include "gt/intel_gt_clock_utils.h"
|
2020-07-08 08:39:47 +08:00
|
|
|
#include "gt/intel_gt.h"
|
2019-08-09 04:27:58 +08:00
|
|
|
#include "gt/intel_gt_pm.h"
|
2019-10-04 21:40:06 +08:00
|
|
|
#include "gt/intel_gt_requests.h"
|
2019-04-25 01:48:39 +08:00
|
|
|
#include "gt/intel_reset.h"
|
2019-09-27 19:08:49 +08:00
|
|
|
#include "gt/intel_rc6.h"
|
2019-10-25 05:16:41 +08:00
|
|
|
#include "gt/intel_rps.h"
|
2020-07-08 08:39:52 +08:00
|
|
|
#include "gt/intel_sseu_debugfs.h"
|
2019-04-25 01:48:39 +08:00
|
|
|
|
2019-05-02 23:02:43 +08:00
|
|
|
#include "i915_debugfs.h"
|
2019-12-05 23:43:40 +08:00
|
|
|
#include "i915_debugfs_params.h"
|
2019-04-29 20:29:27 +08:00
|
|
|
#include "i915_irq.h"
|
drm/i915: Show timeline dependencies for debug
Include the signalers each request in the timeline is waiting on, as a
means to try and identify the cause of a stall. This can be quite
verbose, even as for now we only show each request in the timeline and
its immediate antecedents.
This generates output like:
Timeline 886: { count 1, ready: 0, inflight: 0, seqno: { current: 664, last: 666 }, engine: rcs0 }
U 886:29a- prio=0 @ 134ms: gem_exec_parall<4621>
U bc1:27a- prio=0 @ 134ms: gem_exec_parall[4917]
Timeline 825: { count 1, ready: 0, inflight: 0, seqno: { current: 802, last: 804 }, engine: vcs0 }
U 825:324 prio=0 @ 107ms: gem_exec_parall<4518>
U b75:140- prio=0 @ 110ms: gem_exec_parall<5486>
Timeline b46: { count 1, ready: 0, inflight: 0, seqno: { current: 782, last: 784 }, engine: vcs0 }
U b46:310- prio=0 @ 70ms: gem_exec_parall<5428>
U c11:170- prio=0 @ 70ms: gem_exec_parall[5501]
Timeline 96b: { count 1, ready: 0, inflight: 0, seqno: { current: 632, last: 634 }, engine: vcs0 }
U 96b:27a- prio=0 @ 67ms: gem_exec_parall<4878>
U b75:19e- prio=0 @ 67ms: gem_exec_parall<5486>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201119165616.10834-6-chris@chris-wilson.co.uk
2020-11-20 00:56:16 +08:00
|
|
|
#include "i915_scheduler.h"
|
2019-08-06 18:07:28 +08:00
|
|
|
#include "i915_trace.h"
|
2019-04-05 19:00:15 +08:00
|
|
|
#include "intel_pm.h"
|
2019-04-26 16:17:22 +08:00
|
|
|
#include "intel_sideband.h"
|
2019-01-16 23:33:04 +08:00
|
|
|
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
|
|
|
|
{
|
|
|
|
return to_i915(node->minor->dev);
|
|
|
|
}
|
|
|
|
|
2010-08-25 23:03:34 +08:00
|
|
|
static int i915_capabilities(struct seq_file *m, void *data)
|
|
|
|
{
|
2019-12-08 02:29:37 +08:00
|
|
|
struct drm_i915_private *i915 = node_to_i915(m->private);
|
2017-12-19 19:43:44 +08:00
|
|
|
struct drm_printer p = drm_seq_file_printer(m);
|
2010-08-25 23:03:34 +08:00
|
|
|
|
2019-12-08 02:29:37 +08:00
|
|
|
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
|
2019-09-11 19:46:55 +08:00
|
|
|
|
2019-12-08 02:29:37 +08:00
|
|
|
intel_device_info_print_static(INTEL_INFO(i915), &p);
|
|
|
|
intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
|
2020-07-08 08:39:47 +08:00
|
|
|
intel_gt_info_print(&i915->gt.info, &p);
|
2019-12-08 02:29:37 +08:00
|
|
|
intel_driver_caps_print(&i915->caps, &p);
|
2010-08-25 23:03:34 +08:00
|
|
|
|
2017-02-07 05:36:08 +08:00
|
|
|
kernel_param_lock(THIS_MODULE);
|
2020-06-18 23:04:02 +08:00
|
|
|
i915_params_dump(&i915->params, &p);
|
2017-02-07 05:36:08 +08:00
|
|
|
kernel_param_unlock(THIS_MODULE);
|
|
|
|
|
2010-08-25 23:03:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2009-02-18 09:08:50 +08:00
|
|
|
|
2016-05-12 21:18:52 +08:00
|
|
|
static char get_tiling_flag(struct drm_i915_gem_object *obj)
|
2009-02-11 22:26:38 +08:00
|
|
|
{
|
2016-08-05 17:14:23 +08:00
|
|
|
switch (i915_gem_object_get_tiling(obj)) {
|
2011-08-17 03:34:10 +08:00
|
|
|
default:
|
2016-04-15 18:34:52 +08:00
|
|
|
case I915_TILING_NONE: return ' ';
|
|
|
|
case I915_TILING_X: return 'X';
|
|
|
|
case I915_TILING_Y: return 'Y';
|
2011-08-17 03:34:10 +08:00
|
|
|
}
|
2009-02-11 22:26:38 +08:00
|
|
|
}
|
|
|
|
|
2016-05-12 21:18:52 +08:00
|
|
|
static char get_global_flag(struct drm_i915_gem_object *obj)
|
2016-04-15 18:34:52 +08:00
|
|
|
{
|
2019-08-22 14:09:13 +08:00
|
|
|
return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
|
2016-04-15 18:34:52 +08:00
|
|
|
}
|
|
|
|
|
2016-05-12 21:18:52 +08:00
|
|
|
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
|
2013-08-01 08:00:00 +08:00
|
|
|
{
|
2016-10-28 20:58:35 +08:00
|
|
|
return obj->mm.mapping ? 'M' : ' ';
|
2013-08-01 08:00:00 +08:00
|
|
|
}
|
|
|
|
|
2017-10-07 06:18:28 +08:00
|
|
|
static const char *
|
|
|
|
stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
size_t x = 0;
|
|
|
|
|
|
|
|
switch (page_sizes) {
|
|
|
|
case 0:
|
|
|
|
return "";
|
|
|
|
case I915_GTT_PAGE_SIZE_4K:
|
|
|
|
return "4K";
|
|
|
|
case I915_GTT_PAGE_SIZE_64K:
|
|
|
|
return "64K";
|
|
|
|
case I915_GTT_PAGE_SIZE_2M:
|
|
|
|
return "2M";
|
|
|
|
default:
|
|
|
|
if (!buf)
|
|
|
|
return "M";
|
|
|
|
|
|
|
|
if (page_sizes & I915_GTT_PAGE_SIZE_2M)
|
|
|
|
x += snprintf(buf + x, len - x, "2M, ");
|
|
|
|
if (page_sizes & I915_GTT_PAGE_SIZE_64K)
|
|
|
|
x += snprintf(buf + x, len - x, "64K, ");
|
|
|
|
if (page_sizes & I915_GTT_PAGE_SIZE_4K)
|
|
|
|
x += snprintf(buf + x, len - x, "4K, ");
|
|
|
|
buf[x-2] = '\0';
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-12 00:14:51 +08:00
|
|
|
void
|
|
|
|
i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
2010-08-26 05:45:57 +08:00
|
|
|
{
|
2015-04-27 20:41:17 +08:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
2016-03-16 19:00:36 +08:00
|
|
|
struct intel_engine_cs *engine;
|
2013-08-01 08:00:00 +08:00
|
|
|
struct i915_vma *vma;
|
2013-12-07 06:10:55 +08:00
|
|
|
int pin_count = 0;
|
|
|
|
|
2019-09-02 12:02:47 +08:00
|
|
|
seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
|
2010-08-26 05:45:57 +08:00
|
|
|
&obj->base,
|
|
|
|
get_tiling_flag(obj),
|
2013-08-01 08:00:00 +08:00
|
|
|
get_global_flag(obj),
|
2016-04-15 18:34:52 +08:00
|
|
|
get_pin_mapped_flag(obj),
|
2011-12-21 00:54:15 +08:00
|
|
|
obj->base.size / 1024,
|
2018-02-16 20:43:38 +08:00
|
|
|
obj->read_domains,
|
|
|
|
obj->write_domain,
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
i915_cache_level_str(dev_priv, obj->cache_level),
|
2016-10-28 20:58:35 +08:00
|
|
|
obj->mm.dirty ? " dirty" : "",
|
|
|
|
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
|
2010-08-26 05:45:57 +08:00
|
|
|
if (obj->base.name)
|
|
|
|
seq_printf(m, " (name: %d)", obj->base.name);
|
2019-06-13 15:32:54 +08:00
|
|
|
|
|
|
|
spin_lock(&obj->vma.lock);
|
2019-01-28 18:23:54 +08:00
|
|
|
list_for_each_entry(vma, &obj->vma.list, obj_link) {
|
2016-08-04 14:52:26 +08:00
|
|
|
if (!drm_mm_node_allocated(&vma->node))
|
|
|
|
continue;
|
|
|
|
|
2019-06-13 15:32:54 +08:00
|
|
|
spin_unlock(&obj->vma.lock);
|
|
|
|
|
|
|
|
if (i915_vma_is_pinned(vma))
|
|
|
|
pin_count++;
|
|
|
|
|
2017-10-07 06:18:28 +08:00
|
|
|
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
|
2016-08-04 23:32:32 +08:00
|
|
|
i915_vma_is_ggtt(vma) ? "g" : "pp",
|
2017-10-07 06:18:28 +08:00
|
|
|
vma->node.start, vma->node.size,
|
|
|
|
stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
|
2017-01-12 19:21:08 +08:00
|
|
|
if (i915_vma_is_ggtt(vma)) {
|
|
|
|
switch (vma->ggtt_view.type) {
|
|
|
|
case I915_GGTT_VIEW_NORMAL:
|
|
|
|
seq_puts(m, ", normal");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case I915_GGTT_VIEW_PARTIAL:
|
|
|
|
seq_printf(m, ", partial [%08llx+%x]",
|
2017-01-14 08:28:25 +08:00
|
|
|
vma->ggtt_view.partial.offset << PAGE_SHIFT,
|
|
|
|
vma->ggtt_view.partial.size << PAGE_SHIFT);
|
2017-01-12 19:21:08 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case I915_GGTT_VIEW_ROTATED:
|
|
|
|
seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
|
2017-01-14 08:28:25 +08:00
|
|
|
vma->ggtt_view.rotated.plane[0].width,
|
|
|
|
vma->ggtt_view.rotated.plane[0].height,
|
|
|
|
vma->ggtt_view.rotated.plane[0].stride,
|
|
|
|
vma->ggtt_view.rotated.plane[0].offset,
|
|
|
|
vma->ggtt_view.rotated.plane[1].width,
|
|
|
|
vma->ggtt_view.rotated.plane[1].height,
|
|
|
|
vma->ggtt_view.rotated.plane[1].stride,
|
|
|
|
vma->ggtt_view.rotated.plane[1].offset);
|
2017-01-12 19:21:08 +08:00
|
|
|
break;
|
|
|
|
|
2019-05-09 20:21:52 +08:00
|
|
|
case I915_GGTT_VIEW_REMAPPED:
|
|
|
|
seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
|
|
|
|
vma->ggtt_view.remapped.plane[0].width,
|
|
|
|
vma->ggtt_view.remapped.plane[0].height,
|
|
|
|
vma->ggtt_view.remapped.plane[0].stride,
|
|
|
|
vma->ggtt_view.remapped.plane[0].offset,
|
|
|
|
vma->ggtt_view.remapped.plane[1].width,
|
|
|
|
vma->ggtt_view.remapped.plane[1].height,
|
|
|
|
vma->ggtt_view.remapped.plane[1].stride,
|
|
|
|
vma->ggtt_view.remapped.plane[1].offset);
|
|
|
|
break;
|
|
|
|
|
2017-01-12 19:21:08 +08:00
|
|
|
default:
|
|
|
|
MISSING_CASE(vma->ggtt_view.type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-08-19 00:17:00 +08:00
|
|
|
if (vma->fence)
|
2019-08-13 01:48:03 +08:00
|
|
|
seq_printf(m, " , fence: %d", vma->fence->id);
|
2016-02-26 19:03:20 +08:00
|
|
|
seq_puts(m, ")");
|
2019-06-13 15:32:54 +08:00
|
|
|
|
|
|
|
spin_lock(&obj->vma.lock);
|
2013-08-01 08:00:00 +08:00
|
|
|
}
|
2019-06-13 15:32:54 +08:00
|
|
|
spin_unlock(&obj->vma.lock);
|
|
|
|
|
|
|
|
seq_printf(m, " (pinned x %d)", pin_count);
|
2021-01-20 05:43:33 +08:00
|
|
|
if (i915_gem_object_is_stolen(obj))
|
2015-01-23 16:05:06 +08:00
|
|
|
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
|
2019-09-02 12:02:47 +08:00
|
|
|
if (i915_gem_object_is_framebuffer(obj))
|
|
|
|
seq_printf(m, " (fb)");
|
2016-08-04 14:52:30 +08:00
|
|
|
|
drm/i915: Move GEM activity tracking into a common struct reservation_object
In preparation to support many distinct timelines, we need to expand the
activity tracking on the GEM object to handle more than just a request
per engine. We already use the struct reservation_object on the dma-buf
to handle many fence contexts, so integrating that into the GEM object
itself is the preferred solution. (For example, we can now share the same
reservation_object between every consumer/producer using this buffer and
skip the manual import/export via dma-buf.)
v2: Reimplement busy-ioctl (by walking the reservation object), postpone
the ABI change for another day. Similarly use the reservation object to
find the last_write request (if active and from i915) for choosing
display CS flips.
Caveats:
* busy-ioctl: busy-ioctl only reports on the native fences, it will not
warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is
being rendered to by external fences. It also will not report the same
busy state as wait-ioctl (or polling on the dma-buf) in the same
circumstances. On the plus side, it does retain reporting of which
*i915* engines are engaged with this object.
* non-blocking atomic modesets take a step backwards as the wait for
render completion blocks the ioctl. This is fixed in a subsequent
patch to use a fence instead for awaiting on the rendering, see
"drm/i915: Restore nonblocking awaits for modesetting"
* dynamic array manipulation for shared-fences in reservation is slower
than the previous lockless static assignment (e.g. gem_exec_lut_handle
runtime on ivb goes from 42s to 66s), mainly due to atomic operations
(maintaining the fence refcounts).
* loss of object-level retirement callbacks, emulated by VMA retirement
tracking.
* minor loss of object-level last activity information from debugfs,
could be replaced with per-vma information if desired
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
|
|
|
engine = i915_gem_object_last_write_engine(obj);
|
2016-08-04 14:52:30 +08:00
|
|
|
if (engine)
|
|
|
|
seq_printf(m, " (%s)", engine->name);
|
2010-08-26 05:45:57 +08:00
|
|
|
}
|
|
|
|
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
static int i915_gem_object_info(struct seq_file *m, void *data)
|
2010-09-30 18:46:12 +08:00
|
|
|
{
|
2019-06-12 18:57:20 +08:00
|
|
|
struct drm_i915_private *i915 = node_to_i915(m->private);
|
2019-12-27 21:37:48 +08:00
|
|
|
struct intel_memory_region *mr;
|
|
|
|
enum intel_region_id id;
|
2010-09-30 18:46:12 +08:00
|
|
|
|
2019-08-03 05:21:36 +08:00
|
|
|
seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
|
2019-06-12 18:57:20 +08:00
|
|
|
i915->mm.shrink_count,
|
2019-08-03 05:21:36 +08:00
|
|
|
atomic_read(&i915->mm.free_count),
|
2019-06-12 18:57:20 +08:00
|
|
|
i915->mm.shrink_memory);
|
2019-12-27 21:37:48 +08:00
|
|
|
for_each_memory_region(mr, i915, id)
|
|
|
|
seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
|
|
|
|
mr->name, &mr->total, &mr->avail);
|
2010-09-30 18:46:12 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-12 17:05:18 +08:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
|
2017-02-15 00:46:11 +08:00
|
|
|
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
|
|
|
|
size_t count, loff_t *pos)
|
2012-04-27 21:17:40 +08:00
|
|
|
{
|
2020-01-10 20:30:56 +08:00
|
|
|
struct i915_gpu_coredump *error;
|
2017-02-15 00:46:11 +08:00
|
|
|
ssize_t ret;
|
2018-11-23 21:23:25 +08:00
|
|
|
void *buf;
|
2012-04-27 21:17:40 +08:00
|
|
|
|
2018-11-23 21:23:25 +08:00
|
|
|
error = file->private_data;
|
2017-02-15 00:46:11 +08:00
|
|
|
if (!error)
|
|
|
|
return 0;
|
2012-04-27 21:17:40 +08:00
|
|
|
|
2018-11-23 21:23:25 +08:00
|
|
|
/* Bounce buffer required because of kernfs __user API convenience. */
|
|
|
|
buf = kmalloc(count, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
2012-04-27 21:17:40 +08:00
|
|
|
|
2020-01-10 20:30:56 +08:00
|
|
|
ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
|
2018-11-23 21:23:25 +08:00
|
|
|
if (ret <= 0)
|
2017-02-15 00:46:11 +08:00
|
|
|
goto out;
|
2012-04-27 21:17:40 +08:00
|
|
|
|
2018-11-23 21:23:25 +08:00
|
|
|
if (!copy_to_user(ubuf, buf, ret))
|
|
|
|
*pos += ret;
|
|
|
|
else
|
|
|
|
ret = -EFAULT;
|
2012-04-27 21:17:40 +08:00
|
|
|
|
2017-02-15 00:46:11 +08:00
|
|
|
out:
|
2018-11-23 21:23:25 +08:00
|
|
|
kfree(buf);
|
2017-02-15 00:46:11 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2013-05-23 18:55:35 +08:00
|
|
|
|
2017-02-15 00:46:11 +08:00
|
|
|
static int gpu_state_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
2020-01-10 20:30:56 +08:00
|
|
|
i915_gpu_coredump_put(file->private_data);
|
2013-05-23 18:55:35 +08:00
|
|
|
return 0;
|
2012-04-27 21:17:40 +08:00
|
|
|
}
|
|
|
|
|
2017-02-15 00:46:11 +08:00
|
|
|
static int i915_gpu_info_open(struct inode *inode, struct file *file)
|
2012-04-27 21:17:40 +08:00
|
|
|
{
|
2017-03-28 21:14:07 +08:00
|
|
|
struct drm_i915_private *i915 = inode->i_private;
|
2020-01-10 20:30:56 +08:00
|
|
|
struct i915_gpu_coredump *gpu;
|
2019-01-14 22:21:14 +08:00
|
|
|
intel_wakeref_t wakeref;
|
2012-04-27 21:17:40 +08:00
|
|
|
|
2019-01-14 22:21:23 +08:00
|
|
|
gpu = NULL;
|
2019-06-14 07:21:55 +08:00
|
|
|
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
|
2020-11-04 21:47:42 +08:00
|
|
|
gpu = i915_gpu_coredump(&i915->gt, ALL_ENGINES);
|
2018-12-07 19:05:54 +08:00
|
|
|
if (IS_ERR(gpu))
|
|
|
|
return PTR_ERR(gpu);
|
2012-04-27 21:17:40 +08:00
|
|
|
|
2017-02-15 00:46:11 +08:00
|
|
|
file->private_data = gpu;
|
2013-05-23 18:55:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-15 00:46:11 +08:00
|
|
|
static const struct file_operations i915_gpu_info_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = i915_gpu_info_open,
|
|
|
|
.read = gpu_state_read,
|
|
|
|
.llseek = default_llseek,
|
|
|
|
.release = gpu_state_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
i915_error_state_write(struct file *filp,
|
|
|
|
const char __user *ubuf,
|
|
|
|
size_t cnt,
|
|
|
|
loff_t *ppos)
|
2013-06-06 20:18:41 +08:00
|
|
|
{
|
2020-01-10 20:30:56 +08:00
|
|
|
struct i915_gpu_coredump *error = filp->private_data;
|
2013-06-06 20:18:41 +08:00
|
|
|
|
2017-02-15 00:46:11 +08:00
|
|
|
if (!error)
|
|
|
|
return 0;
|
2013-05-23 18:55:35 +08:00
|
|
|
|
2020-04-02 19:48:08 +08:00
|
|
|
drm_dbg(&error->i915->drm, "Resetting error state\n");
|
2017-02-15 00:46:11 +08:00
|
|
|
i915_reset_error_state(error->i915);
|
2013-05-23 18:55:35 +08:00
|
|
|
|
2017-02-15 00:46:11 +08:00
|
|
|
return cnt;
|
|
|
|
}
|
2013-05-23 18:55:35 +08:00
|
|
|
|
2017-02-15 00:46:11 +08:00
|
|
|
static int i915_error_state_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2020-01-10 20:30:56 +08:00
|
|
|
struct i915_gpu_coredump *error;
|
2018-12-07 19:05:54 +08:00
|
|
|
|
|
|
|
error = i915_first_error_state(inode->i_private);
|
|
|
|
if (IS_ERR(error))
|
|
|
|
return PTR_ERR(error);
|
|
|
|
|
|
|
|
file->private_data = error;
|
2017-02-15 00:46:11 +08:00
|
|
|
return 0;
|
2012-04-27 21:17:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations i915_error_state_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = i915_error_state_open,
|
2017-02-15 00:46:11 +08:00
|
|
|
.read = gpu_state_read,
|
2012-04-27 21:17:40 +08:00
|
|
|
.write = i915_error_state_write,
|
|
|
|
.llseek = default_llseek,
|
2017-02-15 00:46:11 +08:00
|
|
|
.release = gpu_state_release,
|
2012-04-27 21:17:40 +08:00
|
|
|
};
|
2016-10-12 17:05:18 +08:00
|
|
|
#endif
|
|
|
|
|
2014-03-31 14:00:02 +08:00
|
|
|
static int i915_frequency_info(struct seq_file *m, void *unused)
|
2010-01-30 03:27:07 +08:00
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2019-06-11 18:45:48 +08:00
|
|
|
struct intel_uncore *uncore = &dev_priv->uncore;
|
2019-10-25 05:16:41 +08:00
|
|
|
struct intel_rps *rps = &dev_priv->gt.rps;
|
2019-01-14 22:21:14 +08:00
|
|
|
intel_wakeref_t wakeref;
|
2013-11-28 04:21:54 +08:00
|
|
|
|
2019-06-14 07:21:54 +08:00
|
|
|
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
2010-12-18 06:19:02 +08:00
|
|
|
|
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-13 02:10:43 +08:00
|
|
|
if (IS_GEN(dev_priv, 5)) {
|
2019-06-11 18:45:48 +08:00
|
|
|
u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
|
|
|
|
u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
|
2010-12-18 06:19:02 +08:00
|
|
|
|
|
|
|
seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
|
|
|
|
seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
|
|
|
|
seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
|
|
|
|
MEMSTAT_VID_SHIFT);
|
|
|
|
seq_printf(m, "Current P-state: %d\n",
|
|
|
|
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
2017-10-11 05:30:02 +08:00
|
|
|
u32 rpmodectl, freq_sts;
|
2015-12-10 04:29:35 +08:00
|
|
|
|
2020-11-30 19:15:56 +08:00
|
|
|
rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
|
2017-10-11 05:30:02 +08:00
|
|
|
seq_printf(m, "Video Turbo Mode: %s\n",
|
|
|
|
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
|
|
|
|
seq_printf(m, "HW control enabled: %s\n",
|
|
|
|
yesno(rpmodectl & GEN6_RP_ENABLE));
|
|
|
|
seq_printf(m, "SW control enabled: %s\n",
|
|
|
|
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
|
|
|
|
GEN6_RP_MEDIA_SW_MODE));
|
|
|
|
|
2019-04-26 16:17:20 +08:00
|
|
|
vlv_punit_get(dev_priv);
|
2015-12-10 04:29:35 +08:00
|
|
|
freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
|
2019-04-26 16:17:20 +08:00
|
|
|
vlv_punit_put(dev_priv);
|
|
|
|
|
2015-12-10 04:29:35 +08:00
|
|
|
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
|
|
|
|
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
|
|
|
|
|
|
|
|
seq_printf(m, "actual GPU freq: %d MHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
|
2015-12-10 04:29:35 +08:00
|
|
|
|
|
|
|
seq_printf(m, "current GPU freq: %d MHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->cur_freq));
|
2015-12-10 04:29:35 +08:00
|
|
|
|
|
|
|
seq_printf(m, "max GPU freq: %d MHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->max_freq));
|
2015-12-10 04:29:35 +08:00
|
|
|
|
|
|
|
seq_printf(m, "min GPU freq: %d MHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->min_freq));
|
2015-12-10 04:29:35 +08:00
|
|
|
|
|
|
|
seq_printf(m, "idle GPU freq: %d MHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->idle_freq));
|
2015-12-10 04:29:35 +08:00
|
|
|
|
|
|
|
seq_printf(m,
|
|
|
|
"efficient (RPe) frequency: %d MHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->efficient_freq));
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
} else if (INTEL_GEN(dev_priv) >= 6) {
|
2015-06-26 05:54:07 +08:00
|
|
|
u32 rp_state_limits;
|
|
|
|
u32 gt_perf_status;
|
|
|
|
u32 rp_state_cap;
|
2014-03-27 17:06:14 +08:00
|
|
|
u32 rpmodectl, rpinclimit, rpdeclimit;
|
2013-08-27 06:51:01 +08:00
|
|
|
u32 rpstat, cagf, reqf;
|
2011-01-19 07:49:25 +08:00
|
|
|
u32 rpupei, rpcurup, rpprevup;
|
|
|
|
u32 rpdownei, rpcurdown, rpprevdown;
|
2014-08-02 05:14:48 +08:00
|
|
|
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
|
2010-12-18 06:19:02 +08:00
|
|
|
int max_freq;
|
|
|
|
|
2020-11-30 19:15:56 +08:00
|
|
|
rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS);
|
2016-12-02 16:23:49 +08:00
|
|
|
if (IS_GEN9_LP(dev_priv)) {
|
2020-11-30 19:15:56 +08:00
|
|
|
rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP);
|
|
|
|
gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS);
|
2015-06-26 05:54:07 +08:00
|
|
|
} else {
|
2020-11-30 19:15:56 +08:00
|
|
|
rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP);
|
|
|
|
gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS);
|
2015-06-26 05:54:07 +08:00
|
|
|
}
|
|
|
|
|
2010-12-18 06:19:02 +08:00
|
|
|
/* RPSTAT1 is in the GT power well */
|
2019-03-20 02:35:36 +08:00
|
|
|
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
|
2010-12-18 06:19:02 +08:00
|
|
|
|
2020-11-30 19:15:56 +08:00
|
|
|
reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ);
|
2017-07-07 04:41:13 +08:00
|
|
|
if (INTEL_GEN(dev_priv) >= 9)
|
2015-03-06 13:37:21 +08:00
|
|
|
reqf >>= 23;
|
|
|
|
else {
|
|
|
|
reqf &= ~GEN6_TURBO_DISABLE;
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
2015-03-06 13:37:21 +08:00
|
|
|
reqf >>= 24;
|
|
|
|
else
|
|
|
|
reqf >>= 25;
|
|
|
|
}
|
2019-10-25 05:16:41 +08:00
|
|
|
reqf = intel_gpu_freq(rps, reqf);
|
2013-08-27 06:51:01 +08:00
|
|
|
|
2020-11-30 19:15:56 +08:00
|
|
|
rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
|
|
|
|
rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD);
|
|
|
|
rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD);
|
|
|
|
|
|
|
|
rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1);
|
|
|
|
rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
|
|
|
|
rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
|
|
|
|
rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
|
|
|
|
rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
|
|
|
|
rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
|
|
|
|
rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
|
2019-12-14 02:37:35 +08:00
|
|
|
cagf = intel_rps_read_actual_frequency(rps);
|
2011-01-19 07:49:25 +08:00
|
|
|
|
2019-03-20 02:35:36 +08:00
|
|
|
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
|
2011-04-26 03:11:50 +08:00
|
|
|
|
2018-05-11 05:59:55 +08:00
|
|
|
if (INTEL_GEN(dev_priv) >= 11) {
|
2020-11-30 19:15:56 +08:00
|
|
|
pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
|
|
|
|
pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
|
2018-05-11 05:59:55 +08:00
|
|
|
/*
|
|
|
|
* The equivalent to the PM ISR & IIR cannot be read
|
|
|
|
* without affecting the current state of the system
|
|
|
|
*/
|
|
|
|
pm_isr = 0;
|
|
|
|
pm_iir = 0;
|
|
|
|
} else if (INTEL_GEN(dev_priv) >= 8) {
|
2020-11-30 19:15:56 +08:00
|
|
|
pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2));
|
|
|
|
pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2));
|
|
|
|
pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2));
|
|
|
|
pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2));
|
2018-05-11 05:59:55 +08:00
|
|
|
} else {
|
2020-11-30 19:15:56 +08:00
|
|
|
pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER);
|
|
|
|
pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR);
|
|
|
|
pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR);
|
|
|
|
pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
|
2014-08-02 05:14:48 +08:00
|
|
|
}
|
2020-11-30 19:15:56 +08:00
|
|
|
pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK);
|
2018-05-11 05:59:55 +08:00
|
|
|
|
2017-10-11 05:29:59 +08:00
|
|
|
seq_printf(m, "Video Turbo Mode: %s\n",
|
|
|
|
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
|
|
|
|
seq_printf(m, "HW control enabled: %s\n",
|
|
|
|
yesno(rpmodectl & GEN6_RP_ENABLE));
|
|
|
|
seq_printf(m, "SW control enabled: %s\n",
|
|
|
|
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
|
|
|
|
GEN6_RP_MEDIA_SW_MODE));
|
2018-05-11 05:59:55 +08:00
|
|
|
|
|
|
|
seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
|
|
|
|
pm_ier, pm_imr, pm_mask);
|
|
|
|
if (INTEL_GEN(dev_priv) <= 10)
|
|
|
|
seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
|
|
|
|
pm_isr, pm_iir);
|
2017-03-11 10:37:00 +08:00
|
|
|
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
|
2017-10-11 05:30:06 +08:00
|
|
|
rps->pm_intrmsk_mbz);
|
2010-12-18 06:19:02 +08:00
|
|
|
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
|
|
|
|
seq_printf(m, "Render p-state ratio: %d\n",
|
2017-07-07 04:41:13 +08:00
|
|
|
(gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
|
2010-12-18 06:19:02 +08:00
|
|
|
seq_printf(m, "Render p-state VID: %d\n",
|
|
|
|
gt_perf_status & 0xff);
|
|
|
|
seq_printf(m, "Render p-state limit: %d\n",
|
|
|
|
rp_state_limits & 0xff);
|
2014-03-27 17:06:14 +08:00
|
|
|
seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
|
|
|
|
seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
|
|
|
|
seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
|
|
|
|
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
|
2013-08-27 06:51:01 +08:00
|
|
|
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
|
2013-01-30 04:00:15 +08:00
|
|
|
seq_printf(m, "CAGF: %dMHz\n", cagf);
|
2020-12-23 20:23:59 +08:00
|
|
|
seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
|
2020-04-25 00:28:05 +08:00
|
|
|
rpupei,
|
|
|
|
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
|
2020-12-23 20:23:59 +08:00
|
|
|
seq_printf(m, "RP CUR UP: %d (%lldun)\n",
|
2020-04-25 00:28:05 +08:00
|
|
|
rpcurup,
|
|
|
|
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
|
2020-12-23 20:23:59 +08:00
|
|
|
seq_printf(m, "RP PREV UP: %d (%lldns)\n",
|
2020-04-25 00:28:05 +08:00
|
|
|
rpprevup,
|
|
|
|
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
|
drm/i915: Interactive RPS mode
RPS provides a feedback loop where we use the load during the previous
evaluation interval to decide whether to up or down clock the GPU
frequency. Our responsiveness is split into 3 regimes, a high and low
plateau with the intent to keep the gpu clocked high to cover occasional
stalls under high load, and low despite occasional glitches under steady
low load, and inbetween. However, we run into situations like kodi where
we want to stay at low power (video decoding is done efficiently
inside the fixed function HW and doesn't need high clocks even for high
bitrate streams), but just occasionally the pipeline is more complex
than a video decode and we need a smidgen of extra GPU power to present
on time. In the high power regime, we sample at sub frame intervals with
a bias to upclocking, and conversely at low power we sample over a few
frames worth to provide what we consider to be the right levels of
responsiveness respectively. At low power, we more or less expect to be
kicked out to high power at the start of a busy sequence by waitboosting.
Prior to commit e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active
request") whenever we missed the frame or stalled, we would immediate go
full throttle and upclock the GPU to max. But in commit e9af4ea2b9e7, we
relaxed the waitboosting to only apply if the pipeline was deep to avoid
over-committing resources for a near miss. Sadly though, a near miss is
still a miss, and perceptible as jitter in the frame delivery.
To try and prevent the near miss before having to resort to boosting
after the fact, we use the pageflip queue as an indication that we are
in an "interactive" regime and so should sample the load more frequently
to provide power before the frame misses it vblank. This will make us
more favorable to providing a small power increase (one or two bins) as
required rather than going all the way to maximum and then having to
work back down again. (We still keep the waitboosting mechanism around
just in case a dramatic change in system load requires urgent uplocking,
faster than we can provide in a few evaluation intervals.)
v2: Reduce rps_set_interactive to a boolean parameter to avoid the
confusion of what if they wanted a new power mode after pinning to a
different mode (which to choose?)
v3: Only reprogram RPS while the GT is awake, it will be set when we
wake the GT, and while off warns about being used outside of rpm.
v4: Fix deferred application of interactive mode
v5: s/state/interactive/
v6: Group the mutex with its principle in a substruct
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107111
Fixes: e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active request")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180731132629.3381-1-chris@chris-wilson.co.uk
2018-07-31 21:26:29 +08:00
|
|
|
seq_printf(m, "Up threshold: %d%%\n",
|
|
|
|
rps->power.up_threshold);
|
2015-04-27 20:41:19 +08:00
|
|
|
|
2020-12-23 20:23:59 +08:00
|
|
|
seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
|
2020-04-25 00:28:05 +08:00
|
|
|
rpdownei,
|
|
|
|
intel_gt_pm_interval_to_ns(&dev_priv->gt,
|
|
|
|
rpdownei));
|
2020-12-23 20:23:59 +08:00
|
|
|
seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
|
2020-04-25 00:28:05 +08:00
|
|
|
rpcurdown,
|
|
|
|
intel_gt_pm_interval_to_ns(&dev_priv->gt,
|
|
|
|
rpcurdown));
|
2020-12-23 20:23:59 +08:00
|
|
|
seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
|
2020-04-25 00:28:05 +08:00
|
|
|
rpprevdown,
|
|
|
|
intel_gt_pm_interval_to_ns(&dev_priv->gt,
|
|
|
|
rpprevdown));
|
drm/i915: Interactive RPS mode
RPS provides a feedback loop where we use the load during the previous
evaluation interval to decide whether to up or down clock the GPU
frequency. Our responsiveness is split into 3 regimes, a high and low
plateau with the intent to keep the gpu clocked high to cover occasional
stalls under high load, and low despite occasional glitches under steady
low load, and inbetween. However, we run into situations like kodi where
we want to stay at low power (video decoding is done efficiently
inside the fixed function HW and doesn't need high clocks even for high
bitrate streams), but just occasionally the pipeline is more complex
than a video decode and we need a smidgen of extra GPU power to present
on time. In the high power regime, we sample at sub frame intervals with
a bias to upclocking, and conversely at low power we sample over a few
frames worth to provide what we consider to be the right levels of
responsiveness respectively. At low power, we more or less expect to be
kicked out to high power at the start of a busy sequence by waitboosting.
Prior to commit e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active
request") whenever we missed the frame or stalled, we would immediate go
full throttle and upclock the GPU to max. But in commit e9af4ea2b9e7, we
relaxed the waitboosting to only apply if the pipeline was deep to avoid
over-committing resources for a near miss. Sadly though, a near miss is
still a miss, and perceptible as jitter in the frame delivery.
To try and prevent the near miss before having to resort to boosting
after the fact, we use the pageflip queue as an indication that we are
in an "interactive" regime and so should sample the load more frequently
to provide power before the frame misses it vblank. This will make us
more favorable to providing a small power increase (one or two bins) as
required rather than going all the way to maximum and then having to
work back down again. (We still keep the waitboosting mechanism around
just in case a dramatic change in system load requires urgent uplocking,
faster than we can provide in a few evaluation intervals.)
v2: Reduce rps_set_interactive to a boolean parameter to avoid the
confusion of what if they wanted a new power mode after pinning to a
different mode (which to choose?)
v3: Only reprogram RPS while the GT is awake, it will be set when we
wake the GT, and while off warns about being used outside of rpm.
v4: Fix deferred application of interactive mode
v5: s/state/interactive/
v6: Group the mutex with its principle in a substruct
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107111
Fixes: e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active request")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180731132629.3381-1-chris@chris-wilson.co.uk
2018-07-31 21:26:29 +08:00
|
|
|
seq_printf(m, "Down threshold: %d%%\n",
|
|
|
|
rps->power.down_threshold);
|
2010-12-18 06:19:02 +08:00
|
|
|
|
2016-12-02 16:23:49 +08:00
|
|
|
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
|
2015-06-26 05:54:07 +08:00
|
|
|
rp_state_cap >> 16) & 0xff;
|
2017-07-07 04:41:13 +08:00
|
|
|
max_freq *= (IS_GEN9_BC(dev_priv) ||
|
2018-04-05 22:00:52 +08:00
|
|
|
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
|
2010-12-18 06:19:02 +08:00
|
|
|
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, max_freq));
|
2010-12-18 06:19:02 +08:00
|
|
|
|
|
|
|
max_freq = (rp_state_cap & 0xff00) >> 8;
|
2017-07-07 04:41:13 +08:00
|
|
|
max_freq *= (IS_GEN9_BC(dev_priv) ||
|
2018-04-05 22:00:52 +08:00
|
|
|
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
|
2010-12-18 06:19:02 +08:00
|
|
|
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, max_freq));
|
2010-12-18 06:19:02 +08:00
|
|
|
|
2016-12-02 16:23:49 +08:00
|
|
|
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
|
2015-06-26 05:54:07 +08:00
|
|
|
rp_state_cap >> 0) & 0xff;
|
2017-07-07 04:41:13 +08:00
|
|
|
max_freq *= (IS_GEN9_BC(dev_priv) ||
|
2018-04-05 22:00:52 +08:00
|
|
|
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
|
2010-12-18 06:19:02 +08:00
|
|
|
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, max_freq));
|
2013-04-06 05:29:22 +08:00
|
|
|
seq_printf(m, "Max overclocked frequency: %dMHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->max_freq));
|
2015-03-18 17:48:21 +08:00
|
|
|
|
2015-04-27 20:41:19 +08:00
|
|
|
seq_printf(m, "Current freq: %d MHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->cur_freq));
|
2015-04-27 20:41:19 +08:00
|
|
|
seq_printf(m, "Actual freq: %d MHz\n", cagf);
|
2015-03-18 17:48:21 +08:00
|
|
|
seq_printf(m, "Idle freq: %d MHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->idle_freq));
|
2015-04-27 20:41:19 +08:00
|
|
|
seq_printf(m, "Min freq: %d MHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->min_freq));
|
2016-07-13 16:10:35 +08:00
|
|
|
seq_printf(m, "Boost freq: %d MHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->boost_freq));
|
2015-04-27 20:41:19 +08:00
|
|
|
seq_printf(m, "Max freq: %d MHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->max_freq));
|
2015-04-27 20:41:19 +08:00
|
|
|
seq_printf(m,
|
|
|
|
"efficient (RPe) frequency: %d MHz\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->efficient_freq));
|
2010-12-18 06:19:02 +08:00
|
|
|
} else {
|
2013-06-25 05:59:48 +08:00
|
|
|
seq_puts(m, "no P-state info available\n");
|
2010-12-18 06:19:02 +08:00
|
|
|
}
|
2010-01-30 03:27:07 +08:00
|
|
|
|
2017-02-08 02:33:45 +08:00
|
|
|
seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
|
2015-09-25 19:00:32 +08:00
|
|
|
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
|
|
|
|
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
|
|
|
|
|
2019-06-14 07:21:54 +08:00
|
|
|
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
2020-10-29 10:18:45 +08:00
|
|
|
return 0;
|
2010-01-30 03:27:07 +08:00
|
|
|
}
|
|
|
|
|
2011-12-14 20:57:16 +08:00
|
|
|
static const char *swizzle_string(unsigned swizzle)
|
|
|
|
{
|
2013-06-25 05:59:49 +08:00
|
|
|
switch (swizzle) {
|
2011-12-14 20:57:16 +08:00
|
|
|
case I915_BIT_6_SWIZZLE_NONE:
|
|
|
|
return "none";
|
|
|
|
case I915_BIT_6_SWIZZLE_9:
|
|
|
|
return "bit9";
|
|
|
|
case I915_BIT_6_SWIZZLE_9_10:
|
|
|
|
return "bit9/bit10";
|
|
|
|
case I915_BIT_6_SWIZZLE_9_11:
|
|
|
|
return "bit9/bit11";
|
|
|
|
case I915_BIT_6_SWIZZLE_9_10_11:
|
|
|
|
return "bit9/bit10/bit11";
|
|
|
|
case I915_BIT_6_SWIZZLE_9_17:
|
|
|
|
return "bit9/bit17";
|
|
|
|
case I915_BIT_6_SWIZZLE_9_10_17:
|
|
|
|
return "bit9/bit10/bit17";
|
|
|
|
case I915_BIT_6_SWIZZLE_UNKNOWN:
|
2012-12-29 01:00:09 +08:00
|
|
|
return "unknown";
|
2011-12-14 20:57:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return "bug";
|
|
|
|
}
|
|
|
|
|
|
|
|
static int i915_swizzle_info(struct seq_file *m, void *data)
|
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2019-06-11 18:45:48 +08:00
|
|
|
struct intel_uncore *uncore = &dev_priv->uncore;
|
2019-01-14 22:21:14 +08:00
|
|
|
intel_wakeref_t wakeref;
|
2012-08-09 21:07:02 +08:00
|
|
|
|
2011-12-14 20:57:16 +08:00
|
|
|
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
|
2019-10-16 22:32:34 +08:00
|
|
|
swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
|
2011-12-14 20:57:16 +08:00
|
|
|
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
|
2019-10-16 22:32:34 +08:00
|
|
|
swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
|
2011-12-14 20:57:16 +08:00
|
|
|
|
2020-07-03 04:07:14 +08:00
|
|
|
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
|
|
|
|
seq_puts(m, "L-shaped memory detected\n");
|
|
|
|
|
|
|
|
/* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
|
|
|
|
if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
|
|
|
|
drm/i915: merge gen checks to use range
Instead of using IS_GEN() for consecutive gen checks, let's pass the
range to IS_GEN_RANGE(). By code inspection these were the ranges deemed
necessary for spatch:
@@
expression e;
@@
(
- IS_GEN(e, 3) || IS_GEN(e, 2)
+ IS_GEN_RANGE(e, 2, 3)
|
- IS_GEN(e, 3) || IS_GEN(e, 4)
+ IS_GEN_RANGE(e, 3, 4)
|
- IS_GEN(e, 5) || IS_GEN(e, 6)
+ IS_GEN_RANGE(e, 5, 6)
|
- IS_GEN(e, 6) || IS_GEN(e, 7)
+ IS_GEN_RANGE(e, 6, 7)
|
- IS_GEN(e, 7) || IS_GEN(e, 8)
+ IS_GEN_RANGE(e, 7, 8)
|
- IS_GEN(e, 8) || IS_GEN(e, 9)
+ IS_GEN_RANGE(e, 8, 9)
|
- IS_GEN(e, 10) || IS_GEN(e, 9)
+ IS_GEN_RANGE(e, 9, 10)
|
- IS_GEN(e, 9) || IS_GEN(e, 10)
+ IS_GEN_RANGE(e, 9, 10)
)
After conversion, checking we don't have any missing IS_GEN_RANGE() ||
IS_GEN() was also done.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-3-lucas.demarchi@intel.com
2018-12-13 02:10:44 +08:00
|
|
|
if (IS_GEN_RANGE(dev_priv, 3, 4)) {
|
2011-12-14 20:57:16 +08:00
|
|
|
seq_printf(m, "DDC = 0x%08x\n",
|
2019-06-11 18:45:48 +08:00
|
|
|
intel_uncore_read(uncore, DCC));
|
2014-11-20 16:26:30 +08:00
|
|
|
seq_printf(m, "DDC2 = 0x%08x\n",
|
2019-06-11 18:45:48 +08:00
|
|
|
intel_uncore_read(uncore, DCC2));
|
2011-12-14 20:57:16 +08:00
|
|
|
seq_printf(m, "C0DRB3 = 0x%04x\n",
|
2019-06-11 18:45:48 +08:00
|
|
|
intel_uncore_read16(uncore, C0DRB3));
|
2011-12-14 20:57:16 +08:00
|
|
|
seq_printf(m, "C1DRB3 = 0x%04x\n",
|
2019-06-11 18:45:48 +08:00
|
|
|
intel_uncore_read16(uncore, C1DRB3));
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
} else if (INTEL_GEN(dev_priv) >= 6) {
|
2012-01-31 23:47:56 +08:00
|
|
|
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
|
2019-06-11 18:45:48 +08:00
|
|
|
intel_uncore_read(uncore, MAD_DIMM_C0));
|
2012-01-31 23:47:56 +08:00
|
|
|
seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
|
2019-06-11 18:45:48 +08:00
|
|
|
intel_uncore_read(uncore, MAD_DIMM_C1));
|
2012-01-31 23:47:56 +08:00
|
|
|
seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
|
2019-06-11 18:45:48 +08:00
|
|
|
intel_uncore_read(uncore, MAD_DIMM_C2));
|
2012-01-31 23:47:56 +08:00
|
|
|
seq_printf(m, "TILECTL = 0x%08x\n",
|
2019-06-11 18:45:48 +08:00
|
|
|
intel_uncore_read(uncore, TILECTL));
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
if (INTEL_GEN(dev_priv) >= 8)
|
2013-11-03 12:07:14 +08:00
|
|
|
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
|
2019-06-11 18:45:48 +08:00
|
|
|
intel_uncore_read(uncore, GAMTARBMODE));
|
2013-11-03 12:07:14 +08:00
|
|
|
else
|
|
|
|
seq_printf(m, "ARB_MODE = 0x%08x\n",
|
2019-06-11 18:45:48 +08:00
|
|
|
intel_uncore_read(uncore, ARB_MODE));
|
2012-01-31 23:47:56 +08:00
|
|
|
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
|
2019-06-11 18:45:48 +08:00
|
|
|
intel_uncore_read(uncore, DISP_ARB_CTL));
|
2011-12-14 20:57:16 +08:00
|
|
|
}
|
2014-11-20 16:26:30 +08:00
|
|
|
|
2019-06-14 07:21:54 +08:00
|
|
|
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
2011-12-14 20:57:16 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-07 23:20:32 +08:00
|
|
|
static int i915_rps_boost_info(struct seq_file *m, void *data)
|
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2019-10-25 05:16:41 +08:00
|
|
|
struct intel_rps *rps = &dev_priv->gt.rps;
|
2018-10-02 19:32:21 +08:00
|
|
|
|
2020-04-30 04:54:42 +08:00
|
|
|
seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
|
|
|
|
seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 04:07:17 +08:00
|
|
|
seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
|
2017-06-28 20:35:48 +08:00
|
|
|
seq_printf(m, "Boosts outstanding? %d\n",
|
2017-10-11 05:30:06 +08:00
|
|
|
atomic_read(&rps->num_waiters));
|
drm/i915: Interactive RPS mode
RPS provides a feedback loop where we use the load during the previous
evaluation interval to decide whether to up or down clock the GPU
frequency. Our responsiveness is split into 3 regimes, a high and low
plateau with the intent to keep the gpu clocked high to cover occasional
stalls under high load, and low despite occasional glitches under steady
low load, and inbetween. However, we run into situations like kodi where
we want to stay at low power (video decoding is done efficiently
inside the fixed function HW and doesn't need high clocks even for high
bitrate streams), but just occasionally the pipeline is more complex
than a video decode and we need a smidgen of extra GPU power to present
on time. In the high power regime, we sample at sub frame intervals with
a bias to upclocking, and conversely at low power we sample over a few
frames worth to provide what we consider to be the right levels of
responsiveness respectively. At low power, we more or less expect to be
kicked out to high power at the start of a busy sequence by waitboosting.
Prior to commit e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active
request") whenever we missed the frame or stalled, we would immediate go
full throttle and upclock the GPU to max. But in commit e9af4ea2b9e7, we
relaxed the waitboosting to only apply if the pipeline was deep to avoid
over-committing resources for a near miss. Sadly though, a near miss is
still a miss, and perceptible as jitter in the frame delivery.
To try and prevent the near miss before having to resort to boosting
after the fact, we use the pageflip queue as an indication that we are
in an "interactive" regime and so should sample the load more frequently
to provide power before the frame misses it vblank. This will make us
more favorable to providing a small power increase (one or two bins) as
required rather than going all the way to maximum and then having to
work back down again. (We still keep the waitboosting mechanism around
just in case a dramatic change in system load requires urgent uplocking,
faster than we can provide in a few evaluation intervals.)
v2: Reduce rps_set_interactive to a boolean parameter to avoid the
confusion of what if they wanted a new power mode after pinning to a
different mode (which to choose?)
v3: Only reprogram RPS while the GT is awake, it will be set when we
wake the GT, and while off warns about being used outside of rpm.
v4: Fix deferred application of interactive mode
v5: s/state/interactive/
v6: Group the mutex with its principle in a substruct
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107111
Fixes: e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active request")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180731132629.3381-1-chris@chris-wilson.co.uk
2018-07-31 21:26:29 +08:00
|
|
|
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
|
2018-10-02 19:32:21 +08:00
|
|
|
seq_printf(m, "Frequency requested %d, actual %d\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->cur_freq),
|
2019-12-14 02:37:35 +08:00
|
|
|
intel_rps_read_actual_frequency(rps));
|
2016-08-15 16:49:33 +08:00
|
|
|
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->min_freq),
|
|
|
|
intel_gpu_freq(rps, rps->min_freq_softlimit),
|
|
|
|
intel_gpu_freq(rps, rps->max_freq_softlimit),
|
|
|
|
intel_gpu_freq(rps, rps->max_freq));
|
2016-08-15 16:49:33 +08:00
|
|
|
seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
|
2019-10-25 05:16:41 +08:00
|
|
|
intel_gpu_freq(rps, rps->idle_freq),
|
|
|
|
intel_gpu_freq(rps, rps->efficient_freq),
|
|
|
|
intel_gpu_freq(rps, rps->boost_freq));
|
2016-04-27 01:29:41 +08:00
|
|
|
|
2020-12-31 17:31:49 +08:00
|
|
|
seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
|
2015-04-07 23:20:32 +08:00
|
|
|
|
2013-07-05 02:02:07 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-05 01:23:57 +08:00
|
|
|
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
|
2013-08-20 00:18:10 +08:00
|
|
|
{
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
2016-08-22 18:32:44 +08:00
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
2013-08-20 00:18:10 +08:00
|
|
|
|
2016-04-03 21:14:21 +08:00
|
|
|
if (!HAS_RUNTIME_PM(dev_priv))
|
|
|
|
seq_puts(m, "Runtime power management not supported\n");
|
2013-08-20 00:18:10 +08:00
|
|
|
|
2019-01-14 22:21:25 +08:00
|
|
|
seq_printf(m, "Runtime power status: %s\n",
|
2020-12-01 05:21:58 +08:00
|
|
|
enableddisabled(!dev_priv->power_domains.init_wakeref));
|
2019-01-14 22:21:25 +08:00
|
|
|
|
2019-02-28 18:20:35 +08:00
|
|
|
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
|
2013-08-20 00:18:10 +08:00
|
|
|
seq_printf(m, "IRQs disabled: %s\n",
|
2014-06-21 00:29:20 +08:00
|
|
|
yesno(!intel_irqs_enabled(dev_priv)));
|
2015-06-15 19:52:28 +08:00
|
|
|
#ifdef CONFIG_PM
|
2015-06-05 01:23:58 +08:00
|
|
|
seq_printf(m, "Usage count: %d\n",
|
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 18:59:31 +08:00
|
|
|
atomic_read(&dev_priv->drm.dev->power.usage_count));
|
2015-06-15 19:52:28 +08:00
|
|
|
#else
|
|
|
|
seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
|
|
|
|
#endif
|
2016-04-03 21:14:21 +08:00
|
|
|
seq_printf(m, "PCI device power state: %s [%d]\n",
|
2016-08-22 18:32:44 +08:00
|
|
|
pci_power_name(pdev->current_state),
|
|
|
|
pdev->current_state);
|
2013-08-20 00:18:10 +08:00
|
|
|
|
2019-01-14 22:21:09 +08:00
|
|
|
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
|
|
|
|
struct drm_printer p = drm_seq_file_printer(m);
|
|
|
|
|
2019-06-14 07:21:53 +08:00
|
|
|
print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
|
2019-01-14 22:21:09 +08:00
|
|
|
}
|
|
|
|
|
2013-08-20 17:29:23 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-12 00:14:51 +08:00
|
|
|
static int i915_engine_info(struct seq_file *m, void *unused)
|
2013-11-25 23:15:35 +08:00
|
|
|
{
|
2020-11-20 00:56:14 +08:00
|
|
|
struct drm_i915_private *i915 = node_to_i915(m->private);
|
2020-02-12 00:14:51 +08:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
struct drm_printer p;
|
2013-11-25 23:15:35 +08:00
|
|
|
|
2020-11-20 00:56:14 +08:00
|
|
|
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
2013-11-25 23:15:35 +08:00
|
|
|
|
2020-12-15 23:44:56 +08:00
|
|
|
seq_printf(m, "GT awake? %s [%d], %llums\n",
|
2020-11-20 00:56:14 +08:00
|
|
|
yesno(i915->gt.awake),
|
2020-12-15 23:44:56 +08:00
|
|
|
atomic_read(&i915->gt.wakeref.count),
|
|
|
|
ktime_to_ms(intel_gt_get_awake_time(&i915->gt)));
|
2020-12-23 20:23:59 +08:00
|
|
|
seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
|
|
|
|
i915->gt.clock_frequency,
|
|
|
|
i915->gt.clock_period_ns);
|
2013-11-25 23:15:35 +08:00
|
|
|
|
2020-02-12 00:14:51 +08:00
|
|
|
p = drm_seq_file_printer(m);
|
2020-11-20 00:56:14 +08:00
|
|
|
for_each_uabi_engine(engine, i915)
|
2020-02-12 00:14:51 +08:00
|
|
|
intel_engine_dump(engine, &p, "%s\n", engine->name);
|
2013-11-25 23:15:35 +08:00
|
|
|
|
drm/i915: Show timeline dependencies for debug
Include the signalers each request in the timeline is waiting on, as a
means to try and identify the cause of a stall. This can be quite
verbose, even as for now we only show each request in the timeline and
its immediate antecedents.
This generates output like:
Timeline 886: { count 1, ready: 0, inflight: 0, seqno: { current: 664, last: 666 }, engine: rcs0 }
U 886:29a- prio=0 @ 134ms: gem_exec_parall<4621>
U bc1:27a- prio=0 @ 134ms: gem_exec_parall[4917]
Timeline 825: { count 1, ready: 0, inflight: 0, seqno: { current: 802, last: 804 }, engine: vcs0 }
U 825:324 prio=0 @ 107ms: gem_exec_parall<4518>
U b75:140- prio=0 @ 110ms: gem_exec_parall<5486>
Timeline b46: { count 1, ready: 0, inflight: 0, seqno: { current: 782, last: 784 }, engine: vcs0 }
U b46:310- prio=0 @ 70ms: gem_exec_parall<5428>
U c11:170- prio=0 @ 70ms: gem_exec_parall[5501]
Timeline 96b: { count 1, ready: 0, inflight: 0, seqno: { current: 632, last: 634 }, engine: vcs0 }
U 96b:27a- prio=0 @ 67ms: gem_exec_parall<4878>
U b75:19e- prio=0 @ 67ms: gem_exec_parall<5486>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201119165616.10834-6-chris@chris-wilson.co.uk
2020-11-20 00:56:16 +08:00
|
|
|
intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule);
|
2020-11-20 00:56:14 +08:00
|
|
|
|
|
|
|
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
2013-11-25 23:15:35 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-12 00:14:51 +08:00
|
|
|
static int i915_wa_registers(struct seq_file *m, void *unused)
|
2014-02-08 04:48:15 +08:00
|
|
|
{
|
2020-02-12 00:14:51 +08:00
|
|
|
struct drm_i915_private *i915 = node_to_i915(m->private);
|
|
|
|
struct intel_engine_cs *engine;
|
2019-11-30 02:54:27 +08:00
|
|
|
|
2020-02-12 00:14:51 +08:00
|
|
|
for_each_uabi_engine(engine, i915) {
|
|
|
|
const struct i915_wa_list *wal = &engine->ctx_wa_list;
|
|
|
|
const struct i915_wa *wa;
|
|
|
|
unsigned int count;
|
2019-11-30 02:54:34 +08:00
|
|
|
|
2020-02-12 00:14:51 +08:00
|
|
|
count = wal->count;
|
|
|
|
if (!count)
|
|
|
|
continue;
|
2014-11-05 01:06:50 +08:00
|
|
|
|
2020-02-12 00:14:51 +08:00
|
|
|
seq_printf(m, "%s: Workarounds applied: %u\n",
|
|
|
|
engine->name, count);
|
2014-01-22 20:36:08 +08:00
|
|
|
|
2020-02-12 00:14:51 +08:00
|
|
|
for (wa = wal->list; count--; wa++)
|
|
|
|
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
|
|
|
|
i915_mmio_reg_offset(wa->reg),
|
|
|
|
wa->set, wa->clr);
|
2014-11-05 01:06:50 +08:00
|
|
|
|
2020-02-12 00:14:51 +08:00
|
|
|
seq_printf(m, "\n");
|
|
|
|
}
|
2014-01-22 20:36:08 +08:00
|
|
|
|
2020-02-12 00:14:51 +08:00
|
|
|
return 0;
|
2014-01-22 20:36:08 +08:00
|
|
|
}
|
|
|
|
|
2013-03-11 05:10:06 +08:00
|
|
|
static int
|
|
|
|
i915_wedged_get(void *data, u64 *val)
|
2009-10-14 05:20:20 +08:00
|
|
|
{
|
2019-07-13 03:29:53 +08:00
|
|
|
struct drm_i915_private *i915 = data;
|
|
|
|
int ret = intel_gt_terminally_wedged(&i915->gt);
|
2009-10-14 05:20:20 +08:00
|
|
|
|
2019-02-20 22:56:37 +08:00
|
|
|
switch (ret) {
|
|
|
|
case -EIO:
|
|
|
|
*val = 1;
|
|
|
|
return 0;
|
|
|
|
case 0:
|
|
|
|
*val = 0;
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return ret;
|
|
|
|
}
|
2009-10-14 05:20:20 +08:00
|
|
|
}
|
|
|
|
|
2013-03-11 05:10:06 +08:00
|
|
|
static int
|
|
|
|
i915_wedged_set(void *data, u64 val)
|
2009-10-14 05:20:20 +08:00
|
|
|
{
|
2017-03-25 21:47:35 +08:00
|
|
|
struct drm_i915_private *i915 = data;
|
2014-04-15 01:24:27 +08:00
|
|
|
|
2019-02-08 23:37:06 +08:00
|
|
|
/* Flush any previous reset before applying for a new one */
|
2019-07-13 03:29:53 +08:00
|
|
|
wait_event(i915->gt.reset.queue,
|
|
|
|
!test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
|
2015-01-28 23:03:14 +08:00
|
|
|
|
2019-07-13 03:29:53 +08:00
|
|
|
intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
|
|
|
|
"Manually set wedged engine mask = %llx", val);
|
2013-03-11 05:10:06 +08:00
|
|
|
return 0;
|
2009-10-14 05:20:20 +08:00
|
|
|
}
|
|
|
|
|
2013-03-11 05:10:06 +08:00
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
|
|
|
|
i915_wedged_get, i915_wedged_set,
|
2013-04-12 17:10:05 +08:00
|
|
|
"%llu\n");
|
2009-10-14 05:20:20 +08:00
|
|
|
|
2019-10-12 15:23:07 +08:00
|
|
|
static int
|
|
|
|
i915_perf_noa_delay_set(void *data, u64 val)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = data;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This would lead to infinite waits as we're doing timestamp
|
|
|
|
* difference on the CS with only 32bits.
|
|
|
|
*/
|
2020-12-23 20:23:59 +08:00
|
|
|
if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX)
|
2019-10-12 15:23:07 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
atomic64_set(&i915->perf.noa_programming_delay, val);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
i915_perf_noa_delay_get(void *data, u64 *val)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = data;
|
|
|
|
|
|
|
|
*val = atomic64_read(&i915->perf.noa_programming_delay);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
|
|
|
|
i915_perf_noa_delay_get,
|
|
|
|
i915_perf_noa_delay_set,
|
|
|
|
"%llu\n");
|
|
|
|
|
2017-10-18 20:16:21 +08:00
|
|
|
#define DROP_UNBOUND BIT(0)
|
|
|
|
#define DROP_BOUND BIT(1)
|
|
|
|
#define DROP_RETIRE BIT(2)
|
|
|
|
#define DROP_ACTIVE BIT(3)
|
|
|
|
#define DROP_FREED BIT(4)
|
|
|
|
#define DROP_SHRINK_ALL BIT(5)
|
|
|
|
#define DROP_IDLE BIT(6)
|
2018-09-03 16:33:37 +08:00
|
|
|
#define DROP_RESET_ACTIVE BIT(7)
|
|
|
|
#define DROP_RESET_SEQNO BIT(8)
|
2019-10-12 01:38:23 +08:00
|
|
|
#define DROP_RCU BIT(9)
|
2016-10-28 20:58:42 +08:00
|
|
|
#define DROP_ALL (DROP_UNBOUND | \
|
|
|
|
DROP_BOUND | \
|
|
|
|
DROP_RETIRE | \
|
|
|
|
DROP_ACTIVE | \
|
2017-03-08 22:46:22 +08:00
|
|
|
DROP_FREED | \
|
2017-10-18 20:16:21 +08:00
|
|
|
DROP_SHRINK_ALL |\
|
2018-09-03 16:33:37 +08:00
|
|
|
DROP_IDLE | \
|
|
|
|
DROP_RESET_ACTIVE | \
|
2019-10-12 01:38:23 +08:00
|
|
|
DROP_RESET_SEQNO | \
|
|
|
|
DROP_RCU)
|
2013-03-11 05:10:06 +08:00
|
|
|
static int
|
|
|
|
i915_drop_caches_get(void *data, u64 *val)
|
2013-01-15 20:39:35 +08:00
|
|
|
{
|
2013-03-11 05:10:06 +08:00
|
|
|
*val = DROP_ALL;
|
2013-01-15 20:39:35 +08:00
|
|
|
|
2013-03-11 05:10:06 +08:00
|
|
|
return 0;
|
2013-01-15 20:39:35 +08:00
|
|
|
}
|
2013-03-11 05:10:06 +08:00
|
|
|
static int
|
2019-10-22 17:47:21 +08:00
|
|
|
gt_drop_caches(struct intel_gt *gt, u64 val)
|
2013-01-15 20:39:35 +08:00
|
|
|
{
|
2019-10-04 21:40:02 +08:00
|
|
|
int ret;
|
2013-01-15 20:39:35 +08:00
|
|
|
|
2019-01-28 09:02:18 +08:00
|
|
|
if (val & DROP_RESET_ACTIVE &&
|
2019-10-04 21:40:06 +08:00
|
|
|
wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
|
|
|
|
intel_gt_set_wedged(gt);
|
2018-09-03 16:33:37 +08:00
|
|
|
|
2019-10-04 21:40:02 +08:00
|
|
|
if (val & DROP_RETIRE)
|
2019-10-04 21:40:06 +08:00
|
|
|
intel_gt_retire_requests(gt);
|
2019-03-18 17:51:49 +08:00
|
|
|
|
2019-10-04 21:40:02 +08:00
|
|
|
if (val & (DROP_IDLE | DROP_ACTIVE)) {
|
2019-10-04 21:40:06 +08:00
|
|
|
ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
|
2013-01-15 20:39:35 +08:00
|
|
|
if (ret)
|
2019-03-18 17:51:49 +08:00
|
|
|
return ret;
|
2019-10-04 21:40:02 +08:00
|
|
|
}
|
2013-01-15 20:39:35 +08:00
|
|
|
|
2019-10-04 21:40:02 +08:00
|
|
|
if (val & DROP_IDLE) {
|
2019-10-04 21:40:06 +08:00
|
|
|
ret = intel_gt_pm_wait_for_idle(gt);
|
2019-10-04 21:40:02 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-09-03 16:33:37 +08:00
|
|
|
}
|
|
|
|
|
2019-10-04 21:40:06 +08:00
|
|
|
if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
|
|
|
|
intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
|
2013-01-15 20:39:35 +08:00
|
|
|
|
2020-04-30 19:18:12 +08:00
|
|
|
if (val & DROP_FREED)
|
|
|
|
intel_gt_flush_buffer_pool(gt);
|
|
|
|
|
2019-10-22 17:47:21 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
i915_drop_caches_set(void *data, u64 val)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
|
|
|
|
val, val & DROP_ALL);
|
|
|
|
|
|
|
|
ret = gt_drop_caches(&i915->gt, val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-03-03 17:13:38 +08:00
|
|
|
fs_reclaim_acquire(GFP_KERNEL);
|
2014-09-09 18:16:08 +08:00
|
|
|
if (val & DROP_BOUND)
|
2018-09-03 16:33:37 +08:00
|
|
|
i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
|
2014-09-04 02:23:37 +08:00
|
|
|
|
2014-09-09 18:16:08 +08:00
|
|
|
if (val & DROP_UNBOUND)
|
2018-09-03 16:33:37 +08:00
|
|
|
i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
|
2013-01-15 20:39:35 +08:00
|
|
|
|
2017-03-08 22:46:22 +08:00
|
|
|
if (val & DROP_SHRINK_ALL)
|
2018-09-03 16:33:37 +08:00
|
|
|
i915_gem_shrink_all(i915);
|
2017-03-03 17:13:38 +08:00
|
|
|
fs_reclaim_release(GFP_KERNEL);
|
2017-03-08 22:46:22 +08:00
|
|
|
|
2019-10-12 01:38:23 +08:00
|
|
|
if (val & DROP_RCU)
|
|
|
|
rcu_barrier();
|
|
|
|
|
2018-02-20 06:06:31 +08:00
|
|
|
if (val & DROP_FREED)
|
2018-09-03 16:33:37 +08:00
|
|
|
i915_gem_drain_freed_objects(i915);
|
2016-10-28 20:58:42 +08:00
|
|
|
|
2019-03-18 17:51:49 +08:00
|
|
|
return 0;
|
2013-01-15 20:39:35 +08:00
|
|
|
}
|
|
|
|
|
2013-03-11 05:10:06 +08:00
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
|
|
|
|
i915_drop_caches_get, i915_drop_caches_set,
|
|
|
|
"0x%08llx\n");
|
2013-01-15 20:39:35 +08:00
|
|
|
|
2015-02-14 00:27:54 +08:00
|
|
|
static int i915_sseu_status(struct seq_file *m, void *unused)
|
|
|
|
{
|
2020-07-08 08:39:51 +08:00
|
|
|
struct drm_i915_private *i915 = node_to_i915(m->private);
|
|
|
|
struct intel_gt *gt = &i915->gt;
|
2015-02-14 00:27:54 +08:00
|
|
|
|
2020-07-08 08:39:52 +08:00
|
|
|
return intel_sseu_status(m, gt);
|
2015-02-14 00:27:54 +08:00
|
|
|
}
|
|
|
|
|
2011-04-26 02:25:56 +08:00
|
|
|
static int i915_forcewake_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2017-09-07 21:44:41 +08:00
|
|
|
struct drm_i915_private *i915 = inode->i_private;
|
2019-09-12 20:48:13 +08:00
|
|
|
struct intel_gt *gt = &i915->gt;
|
2011-04-26 02:25:56 +08:00
|
|
|
|
2019-09-12 20:48:13 +08:00
|
|
|
atomic_inc(>->user_wakeref);
|
|
|
|
intel_gt_pm_get(gt);
|
|
|
|
if (INTEL_GEN(i915) >= 6)
|
|
|
|
intel_uncore_forcewake_user_get(gt->uncore);
|
2011-04-26 02:25:56 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-17 05:07:40 +08:00
|
|
|
static int i915_forcewake_release(struct inode *inode, struct file *file)
|
2011-04-26 02:25:56 +08:00
|
|
|
{
|
2017-09-07 21:44:41 +08:00
|
|
|
struct drm_i915_private *i915 = inode->i_private;
|
2019-09-12 20:48:13 +08:00
|
|
|
struct intel_gt *gt = &i915->gt;
|
2011-04-26 02:25:56 +08:00
|
|
|
|
2019-09-12 20:48:13 +08:00
|
|
|
if (INTEL_GEN(i915) >= 6)
|
|
|
|
intel_uncore_forcewake_user_put(&i915->uncore);
|
|
|
|
intel_gt_pm_put(gt);
|
|
|
|
atomic_dec(>->user_wakeref);
|
2011-04-26 02:25:56 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations i915_forcewake_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = i915_forcewake_open,
|
|
|
|
.release = i915_forcewake_release,
|
|
|
|
};
|
|
|
|
|
2013-10-18 02:09:56 +08:00
|
|
|
static const struct drm_info_list i915_debugfs_list[] = {
|
2011-01-14 03:06:50 +08:00
|
|
|
{"i915_capabilities", i915_capabilities, 0},
|
2010-09-30 18:46:12 +08:00
|
|
|
{"i915_gem_objects", i915_gem_object_info, 0},
|
2014-03-31 14:00:02 +08:00
|
|
|
{"i915_frequency_info", i915_frequency_info, 0},
|
2011-12-14 20:57:16 +08:00
|
|
|
{"i915_swizzle_info", i915_swizzle_info, 0},
|
2015-06-05 01:23:57 +08:00
|
|
|
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
|
2016-10-05 04:11:31 +08:00
|
|
|
{"i915_engine_info", i915_engine_info, 0},
|
2014-08-30 23:50:59 +08:00
|
|
|
{"i915_wa_registers", i915_wa_registers, 0},
|
2015-02-14 00:27:54 +08:00
|
|
|
{"i915_sseu_status", i915_sseu_status, 0},
|
2015-04-07 23:20:32 +08:00
|
|
|
{"i915_rps_boost_info", i915_rps_boost_info, 0},
|
2009-02-18 09:08:50 +08:00
|
|
|
};
|
2009-07-02 10:26:52 +08:00
|
|
|
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
|
2009-02-18 09:08:50 +08:00
|
|
|
|
2013-10-18 02:09:56 +08:00
|
|
|
static const struct i915_debugfs_files {
|
2013-07-05 02:49:44 +08:00
|
|
|
const char *name;
|
|
|
|
const struct file_operations *fops;
|
|
|
|
} i915_debugfs_files[] = {
|
2019-10-12 15:23:07 +08:00
|
|
|
{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
|
2013-07-05 02:49:44 +08:00
|
|
|
{"i915_wedged", &i915_wedged_fops},
|
|
|
|
{"i915_gem_drop_caches", &i915_drop_caches_fops},
|
2016-10-12 17:05:18 +08:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
|
2013-07-05 02:49:44 +08:00
|
|
|
{"i915_error_state", &i915_error_state_fops},
|
2017-02-15 00:46:11 +08:00
|
|
|
{"i915_gpu_info", &i915_gpu_info_fops},
|
2016-10-12 17:05:18 +08:00
|
|
|
#endif
|
2013-07-05 02:49:44 +08:00
|
|
|
};
|
|
|
|
|
2020-03-10 21:31:18 +08:00
|
|
|
void i915_debugfs_register(struct drm_i915_private *dev_priv)
|
2009-02-18 09:08:50 +08:00
|
|
|
{
|
2016-07-05 17:40:23 +08:00
|
|
|
struct drm_minor *minor = dev_priv->drm.primary;
|
2018-06-28 15:23:02 +08:00
|
|
|
int i;
|
2009-10-14 05:20:20 +08:00
|
|
|
|
2019-12-05 23:43:40 +08:00
|
|
|
i915_debugfs_params(dev_priv);
|
|
|
|
|
2019-06-13 22:52:29 +08:00
|
|
|
debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
|
|
|
|
to_i915(minor->dev), &i915_forcewake_fops);
|
2013-07-05 02:49:44 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
|
2019-06-13 22:52:29 +08:00
|
|
|
debugfs_create_file(i915_debugfs_files[i].name,
|
|
|
|
S_IRUGO | S_IWUSR,
|
|
|
|
minor->debugfs_root,
|
|
|
|
to_i915(minor->dev),
|
|
|
|
i915_debugfs_files[i].fops);
|
2013-07-05 02:49:44 +08:00
|
|
|
}
|
2012-12-04 21:12:00 +08:00
|
|
|
|
2020-03-10 21:31:18 +08:00
|
|
|
drm_debugfs_create_files(i915_debugfs_list,
|
|
|
|
I915_DEBUGFS_ENTRIES,
|
|
|
|
minor->debugfs_root, minor);
|
2009-02-18 09:08:50 +08:00
|
|
|
}
|