drm/i915: Track all held rpm wakerefs
Everytime we take a wakeref, record the stack trace of where it was taken; clearing the set if we ever drop back to no owners. For debugging a rpm leak, we can look at all the current wakerefs and check if they have a matching rpm_put. v2: Use skip=0 for unwinding the stack as it appears our noinline function doesn't appear on the stack (nor does save_stack_trace itself!) v3: Allow rpm->debug_count to disappear between inspections and so avoid calling krealloc(0) as that may return a ZERO_PTR not NULL! (Mika) v4: Show who last acquire/released the runtime pm Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Jani Nikula <jani.nikula@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Tested-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
74256b7ecf
commit
bd780f37a3
|
@ -21,11 +21,11 @@ config DRM_I915_DEBUG
|
||||||
select DEBUG_FS
|
select DEBUG_FS
|
||||||
select PREEMPT_COUNT
|
select PREEMPT_COUNT
|
||||||
select I2C_CHARDEV
|
select I2C_CHARDEV
|
||||||
|
select STACKDEPOT
|
||||||
select DRM_DP_AUX_CHARDEV
|
select DRM_DP_AUX_CHARDEV
|
||||||
select X86_MSR # used by igt/pm_rpm
|
select X86_MSR # used by igt/pm_rpm
|
||||||
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
|
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
|
||||||
select DRM_DEBUG_MM if DRM=y
|
select DRM_DEBUG_MM if DRM=y
|
||||||
select STACKDEPOT if DRM=y # for DRM_DEBUG_MM
|
|
||||||
select DRM_DEBUG_SELFTEST
|
select DRM_DEBUG_SELFTEST
|
||||||
select SW_SYNC # signaling validation framework (igt/syncobj*)
|
select SW_SYNC # signaling validation framework (igt/syncobj*)
|
||||||
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
||||||
|
@ -173,6 +173,7 @@ config DRM_I915_DEBUG_RUNTIME_PM
|
||||||
bool "Enable extra state checking for runtime PM"
|
bool "Enable extra state checking for runtime PM"
|
||||||
depends on DRM_I915
|
depends on DRM_I915
|
||||||
default n
|
default n
|
||||||
|
select STACKDEPOT
|
||||||
help
|
help
|
||||||
Choose this option to turn on extra state checking for the
|
Choose this option to turn on extra state checking for the
|
||||||
runtime PM functionality. This may introduce overhead during
|
runtime PM functionality. This may introduce overhead during
|
||||||
|
|
|
@ -2702,6 +2702,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
|
||||||
pci_power_name(pdev->current_state),
|
pci_power_name(pdev->current_state),
|
||||||
pdev->current_state);
|
pdev->current_state);
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
|
||||||
|
struct drm_printer p = drm_seq_file_printer(m);
|
||||||
|
|
||||||
|
print_intel_runtime_pm_wakeref(dev_priv, &p);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -905,6 +905,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
|
||||||
mutex_init(&dev_priv->pps_mutex);
|
mutex_init(&dev_priv->pps_mutex);
|
||||||
|
|
||||||
i915_memcpy_init_early(dev_priv);
|
i915_memcpy_init_early(dev_priv);
|
||||||
|
intel_runtime_pm_init_early(dev_priv);
|
||||||
|
|
||||||
ret = i915_workqueues_init(dev_priv);
|
ret = i915_workqueues_init(dev_priv);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
@ -1807,8 +1808,7 @@ void i915_driver_unload(struct drm_device *dev)
|
||||||
i915_driver_cleanup_mmio(dev_priv);
|
i915_driver_cleanup_mmio(dev_priv);
|
||||||
|
|
||||||
enable_rpm_wakeref_asserts(dev_priv);
|
enable_rpm_wakeref_asserts(dev_priv);
|
||||||
|
intel_runtime_pm_cleanup(dev_priv);
|
||||||
WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i915_driver_release(struct drm_device *dev)
|
static void i915_driver_release(struct drm_device *dev)
|
||||||
|
@ -2010,6 +2010,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
|
||||||
|
|
||||||
out:
|
out:
|
||||||
enable_rpm_wakeref_asserts(dev_priv);
|
enable_rpm_wakeref_asserts(dev_priv);
|
||||||
|
if (!dev_priv->uncore.user_forcewake.count)
|
||||||
|
intel_runtime_pm_cleanup(dev_priv);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2965,7 +2967,7 @@ static int intel_runtime_suspend(struct device *kdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
enable_rpm_wakeref_asserts(dev_priv);
|
enable_rpm_wakeref_asserts(dev_priv);
|
||||||
WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
|
intel_runtime_pm_cleanup(dev_priv);
|
||||||
|
|
||||||
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
|
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
|
||||||
DRM_ERROR("Unclaimed access detected prior to suspending\n");
|
DRM_ERROR("Unclaimed access detected prior to suspending\n");
|
||||||
|
|
|
@ -45,6 +45,7 @@
|
||||||
#include <linux/pm_qos.h>
|
#include <linux/pm_qos.h>
|
||||||
#include <linux/reservation.h>
|
#include <linux/reservation.h>
|
||||||
#include <linux/shmem_fs.h>
|
#include <linux/shmem_fs.h>
|
||||||
|
#include <linux/stackdepot.h>
|
||||||
|
|
||||||
#include <drm/intel-gtt.h>
|
#include <drm/intel-gtt.h>
|
||||||
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
|
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
|
||||||
|
@ -1156,6 +1157,25 @@ struct i915_runtime_pm {
|
||||||
atomic_t wakeref_count;
|
atomic_t wakeref_count;
|
||||||
bool suspended;
|
bool suspended;
|
||||||
bool irqs_enabled;
|
bool irqs_enabled;
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||||
|
/*
|
||||||
|
* To aide detection of wakeref leaks and general misuse, we
|
||||||
|
* track all wakeref holders. With manual markup (i.e. returning
|
||||||
|
* a cookie to each rpm_get caller which they then supply to their
|
||||||
|
* paired rpm_put) we can remove corresponding pairs of and keep
|
||||||
|
* the array trimmed to active wakerefs.
|
||||||
|
*/
|
||||||
|
struct intel_runtime_pm_debug {
|
||||||
|
spinlock_t lock;
|
||||||
|
|
||||||
|
depot_stack_handle_t last_acquire;
|
||||||
|
depot_stack_handle_t last_release;
|
||||||
|
|
||||||
|
depot_stack_handle_t *owners;
|
||||||
|
unsigned long count;
|
||||||
|
} debug;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
enum intel_pipe_crc_source {
|
enum intel_pipe_crc_source {
|
||||||
|
|
|
@ -41,6 +41,8 @@
|
||||||
#include <drm/drm_atomic.h>
|
#include <drm/drm_atomic.h>
|
||||||
#include <media/cec-notifier.h>
|
#include <media/cec-notifier.h>
|
||||||
|
|
||||||
|
struct drm_printer;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __wait_for - magic wait macro
|
* __wait_for - magic wait macro
|
||||||
*
|
*
|
||||||
|
@ -2084,6 +2086,7 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
|
||||||
void intel_init_quirks(struct drm_i915_private *dev_priv);
|
void intel_init_quirks(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
/* intel_runtime_pm.c */
|
/* intel_runtime_pm.c */
|
||||||
|
void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv);
|
||||||
int intel_power_domains_init(struct drm_i915_private *);
|
int intel_power_domains_init(struct drm_i915_private *);
|
||||||
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
|
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
|
||||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
|
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
|
||||||
|
@ -2106,6 +2109,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
|
||||||
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
|
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
|
||||||
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
|
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
|
||||||
void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
|
void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
|
||||||
|
void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv);
|
||||||
const char *
|
const char *
|
||||||
intel_display_power_domain_str(enum intel_display_power_domain domain);
|
intel_display_power_domain_str(enum intel_display_power_domain domain);
|
||||||
|
|
||||||
|
@ -2123,23 +2127,23 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
|
||||||
u8 req_slices);
|
u8 req_slices);
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
|
assert_rpm_device_not_suspended(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
WARN_ONCE(dev_priv->runtime_pm.suspended,
|
WARN_ONCE(i915->runtime_pm.suspended,
|
||||||
"Device suspended during HW access\n");
|
"Device suspended during HW access\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
|
assert_rpm_wakelock_held(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
assert_rpm_device_not_suspended(dev_priv);
|
assert_rpm_device_not_suspended(i915);
|
||||||
WARN_ONCE(!atomic_read(&dev_priv->runtime_pm.wakeref_count),
|
WARN_ONCE(!atomic_read(&i915->runtime_pm.wakeref_count),
|
||||||
"RPM wakelock ref not held during HW access");
|
"RPM wakelock ref not held during HW access");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* disable_rpm_wakeref_asserts - disable the RPM assert checks
|
* disable_rpm_wakeref_asserts - disable the RPM assert checks
|
||||||
* @dev_priv: i915 device instance
|
* @i915: i915 device instance
|
||||||
*
|
*
|
||||||
* This function disable asserts that check if we hold an RPM wakelock
|
* This function disable asserts that check if we hold an RPM wakelock
|
||||||
* reference, while keeping the device-not-suspended checks still enabled.
|
* reference, while keeping the device-not-suspended checks still enabled.
|
||||||
|
@ -2156,14 +2160,14 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
|
||||||
* enable_rpm_wakeref_asserts().
|
* enable_rpm_wakeref_asserts().
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
|
disable_rpm_wakeref_asserts(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
|
atomic_inc(&i915->runtime_pm.wakeref_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* enable_rpm_wakeref_asserts - re-enable the RPM assert checks
|
* enable_rpm_wakeref_asserts - re-enable the RPM assert checks
|
||||||
* @dev_priv: i915 device instance
|
* @i915: i915 device instance
|
||||||
*
|
*
|
||||||
* This function re-enables the RPM assert checks after disabling them with
|
* This function re-enables the RPM assert checks after disabling them with
|
||||||
* disable_rpm_wakeref_asserts. It's meant to be used only in special
|
* disable_rpm_wakeref_asserts. It's meant to be used only in special
|
||||||
|
@ -2173,15 +2177,25 @@ disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
|
||||||
* disable_rpm_wakeref_asserts().
|
* disable_rpm_wakeref_asserts().
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
|
enable_rpm_wakeref_asserts(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
atomic_dec(&dev_priv->runtime_pm.wakeref_count);
|
atomic_dec(&i915->runtime_pm.wakeref_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
|
void intel_runtime_pm_get(struct drm_i915_private *i915);
|
||||||
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
|
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
|
||||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
|
void intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
|
||||||
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
|
void intel_runtime_pm_put(struct drm_i915_private *i915);
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||||
|
void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
|
||||||
|
struct drm_printer *p);
|
||||||
|
#else
|
||||||
|
static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
|
||||||
|
struct drm_printer *p)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
|
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
|
||||||
bool override, unsigned int mask);
|
bool override, unsigned int mask);
|
||||||
|
|
|
@ -29,6 +29,8 @@
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/vgaarb.h>
|
#include <linux/vgaarb.h>
|
||||||
|
|
||||||
|
#include <drm/drm_print.h>
|
||||||
|
|
||||||
#include "i915_drv.h"
|
#include "i915_drv.h"
|
||||||
#include "intel_drv.h"
|
#include "intel_drv.h"
|
||||||
|
|
||||||
|
@ -49,6 +51,218 @@
|
||||||
* present for a given platform.
|
* present for a given platform.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||||
|
|
||||||
|
#include <linux/sort.h>
|
||||||
|
|
||||||
|
#define STACKDEPTH 8
|
||||||
|
|
||||||
|
static noinline depot_stack_handle_t __save_depot_stack(void)
|
||||||
|
{
|
||||||
|
unsigned long entries[STACKDEPTH];
|
||||||
|
struct stack_trace trace = {
|
||||||
|
.entries = entries,
|
||||||
|
.max_entries = ARRAY_SIZE(entries),
|
||||||
|
.skip = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
save_stack_trace(&trace);
|
||||||
|
if (trace.nr_entries &&
|
||||||
|
trace.entries[trace.nr_entries - 1] == ULONG_MAX)
|
||||||
|
trace.nr_entries--;
|
||||||
|
|
||||||
|
return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __print_depot_stack(depot_stack_handle_t stack,
|
||||||
|
char *buf, int sz, int indent)
|
||||||
|
{
|
||||||
|
unsigned long entries[STACKDEPTH];
|
||||||
|
struct stack_trace trace = {
|
||||||
|
.entries = entries,
|
||||||
|
.max_entries = ARRAY_SIZE(entries),
|
||||||
|
};
|
||||||
|
|
||||||
|
depot_fetch_stack(stack, &trace);
|
||||||
|
snprint_stack_trace(buf, sz, &trace, indent);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
struct i915_runtime_pm *rpm = &i915->runtime_pm;
|
||||||
|
|
||||||
|
spin_lock_init(&rpm->debug.lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline void
|
||||||
|
track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
struct i915_runtime_pm *rpm = &i915->runtime_pm;
|
||||||
|
depot_stack_handle_t stack, *stacks;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
atomic_inc(&rpm->wakeref_count);
|
||||||
|
assert_rpm_wakelock_held(i915);
|
||||||
|
|
||||||
|
if (!HAS_RUNTIME_PM(i915))
|
||||||
|
return;
|
||||||
|
|
||||||
|
stack = __save_depot_stack();
|
||||||
|
if (!stack)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&rpm->debug.lock, flags);
|
||||||
|
|
||||||
|
if (!rpm->debug.count)
|
||||||
|
rpm->debug.last_acquire = stack;
|
||||||
|
|
||||||
|
stacks = krealloc(rpm->debug.owners,
|
||||||
|
(rpm->debug.count + 1) * sizeof(*stacks),
|
||||||
|
GFP_NOWAIT | __GFP_NOWARN);
|
||||||
|
if (stacks) {
|
||||||
|
stacks[rpm->debug.count++] = stack;
|
||||||
|
rpm->debug.owners = stacks;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&rpm->debug.lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int cmphandle(const void *_a, const void *_b)
|
||||||
|
{
|
||||||
|
const depot_stack_handle_t * const a = _a, * const b = _b;
|
||||||
|
|
||||||
|
if (*a < *b)
|
||||||
|
return -1;
|
||||||
|
else if (*a > *b)
|
||||||
|
return 1;
|
||||||
|
else
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
__print_intel_runtime_pm_wakeref(struct drm_printer *p,
|
||||||
|
const struct intel_runtime_pm_debug *dbg)
|
||||||
|
{
|
||||||
|
unsigned long i;
|
||||||
|
char *buf;
|
||||||
|
|
||||||
|
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||||
|
if (!buf)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (dbg->last_acquire) {
|
||||||
|
__print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
|
||||||
|
drm_printf(p, "Wakeref last acquired:\n%s", buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dbg->last_release) {
|
||||||
|
__print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
|
||||||
|
drm_printf(p, "Wakeref last released:\n%s", buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
drm_printf(p, "Wakeref count: %lu\n", dbg->count);
|
||||||
|
|
||||||
|
sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
|
||||||
|
|
||||||
|
for (i = 0; i < dbg->count; i++) {
|
||||||
|
depot_stack_handle_t stack = dbg->owners[i];
|
||||||
|
unsigned long rep;
|
||||||
|
|
||||||
|
rep = 1;
|
||||||
|
while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
|
||||||
|
rep++, i++;
|
||||||
|
__print_depot_stack(stack, buf, PAGE_SIZE, 2);
|
||||||
|
drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline void
|
||||||
|
untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
struct i915_runtime_pm *rpm = &i915->runtime_pm;
|
||||||
|
struct intel_runtime_pm_debug dbg = {};
|
||||||
|
struct drm_printer p;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
assert_rpm_wakelock_held(i915);
|
||||||
|
if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
|
||||||
|
&rpm->debug.lock,
|
||||||
|
flags)) {
|
||||||
|
dbg = rpm->debug;
|
||||||
|
|
||||||
|
rpm->debug.owners = NULL;
|
||||||
|
rpm->debug.count = 0;
|
||||||
|
rpm->debug.last_release = __save_depot_stack();
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&rpm->debug.lock, flags);
|
||||||
|
}
|
||||||
|
if (!dbg.count)
|
||||||
|
return;
|
||||||
|
|
||||||
|
p = drm_debug_printer("i915");
|
||||||
|
__print_intel_runtime_pm_wakeref(&p, &dbg);
|
||||||
|
|
||||||
|
kfree(dbg.owners);
|
||||||
|
}
|
||||||
|
|
||||||
|
void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
|
||||||
|
struct drm_printer *p)
|
||||||
|
{
|
||||||
|
struct intel_runtime_pm_debug dbg = {};
|
||||||
|
|
||||||
|
do {
|
||||||
|
struct i915_runtime_pm *rpm = &i915->runtime_pm;
|
||||||
|
unsigned long alloc = dbg.count;
|
||||||
|
depot_stack_handle_t *s;
|
||||||
|
|
||||||
|
spin_lock_irq(&rpm->debug.lock);
|
||||||
|
dbg.count = rpm->debug.count;
|
||||||
|
if (dbg.count <= alloc) {
|
||||||
|
memcpy(dbg.owners,
|
||||||
|
rpm->debug.owners,
|
||||||
|
dbg.count * sizeof(*s));
|
||||||
|
}
|
||||||
|
dbg.last_acquire = rpm->debug.last_acquire;
|
||||||
|
dbg.last_release = rpm->debug.last_release;
|
||||||
|
spin_unlock_irq(&rpm->debug.lock);
|
||||||
|
if (dbg.count <= alloc)
|
||||||
|
break;
|
||||||
|
|
||||||
|
s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
|
||||||
|
if (!s)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
dbg.owners = s;
|
||||||
|
} while (1);
|
||||||
|
|
||||||
|
__print_intel_runtime_pm_wakeref(p, &dbg);
|
||||||
|
|
||||||
|
out:
|
||||||
|
kfree(dbg.owners);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static void track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
atomic_inc(&i915->runtime_pm.wakeref_count);
|
||||||
|
assert_rpm_wakelock_held(i915);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
assert_rpm_wakelock_held(i915);
|
||||||
|
atomic_dec(&i915->runtime_pm.wakeref_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
|
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
|
||||||
enum i915_power_well_id power_well_id);
|
enum i915_power_well_id power_well_id);
|
||||||
|
|
||||||
|
@ -3986,7 +4200,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_runtime_pm_get - grab a runtime pm reference
|
* intel_runtime_pm_get - grab a runtime pm reference
|
||||||
* @dev_priv: i915 device instance
|
* @i915: i915 device instance
|
||||||
*
|
*
|
||||||
* This function grabs a device-level runtime pm reference (mostly used for GEM
|
* This function grabs a device-level runtime pm reference (mostly used for GEM
|
||||||
* code to ensure the GTT or GT is on) and ensures that it is powered up.
|
* code to ensure the GTT or GT is on) and ensures that it is powered up.
|
||||||
|
@ -3994,22 +4208,21 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
|
||||||
* Any runtime pm reference obtained by this function must have a symmetric
|
* Any runtime pm reference obtained by this function must have a symmetric
|
||||||
* call to intel_runtime_pm_put() to release the reference again.
|
* call to intel_runtime_pm_put() to release the reference again.
|
||||||
*/
|
*/
|
||||||
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
|
void intel_runtime_pm_get(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
struct pci_dev *pdev = i915->drm.pdev;
|
||||||
struct device *kdev = &pdev->dev;
|
struct device *kdev = &pdev->dev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(kdev);
|
ret = pm_runtime_get_sync(kdev);
|
||||||
WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
|
WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
|
||||||
|
|
||||||
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
|
track_intel_runtime_pm_wakeref(i915);
|
||||||
assert_rpm_wakelock_held(dev_priv);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
|
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
|
||||||
* @dev_priv: i915 device instance
|
* @i915: i915 device instance
|
||||||
*
|
*
|
||||||
* This function grabs a device-level runtime pm reference if the device is
|
* This function grabs a device-level runtime pm reference if the device is
|
||||||
* already in use and ensures that it is powered up. It is illegal to try
|
* already in use and ensures that it is powered up. It is illegal to try
|
||||||
|
@ -4020,10 +4233,10 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
|
||||||
*
|
*
|
||||||
* Returns: True if the wakeref was acquired, or False otherwise.
|
* Returns: True if the wakeref was acquired, or False otherwise.
|
||||||
*/
|
*/
|
||||||
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
|
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_PM)) {
|
if (IS_ENABLED(CONFIG_PM)) {
|
||||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
struct pci_dev *pdev = i915->drm.pdev;
|
||||||
struct device *kdev = &pdev->dev;
|
struct device *kdev = &pdev->dev;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4036,15 +4249,14 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
|
track_intel_runtime_pm_wakeref(i915);
|
||||||
assert_rpm_wakelock_held(dev_priv);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_runtime_pm_get_noresume - grab a runtime pm reference
|
* intel_runtime_pm_get_noresume - grab a runtime pm reference
|
||||||
* @dev_priv: i915 device instance
|
* @i915: i915 device instance
|
||||||
*
|
*
|
||||||
* This function grabs a device-level runtime pm reference (mostly used for GEM
|
* This function grabs a device-level runtime pm reference (mostly used for GEM
|
||||||
* code to ensure the GTT or GT is on).
|
* code to ensure the GTT or GT is on).
|
||||||
|
@ -4059,32 +4271,31 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
|
||||||
* Any runtime pm reference obtained by this function must have a symmetric
|
* Any runtime pm reference obtained by this function must have a symmetric
|
||||||
* call to intel_runtime_pm_put() to release the reference again.
|
* call to intel_runtime_pm_put() to release the reference again.
|
||||||
*/
|
*/
|
||||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
|
void intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
struct pci_dev *pdev = i915->drm.pdev;
|
||||||
struct device *kdev = &pdev->dev;
|
struct device *kdev = &pdev->dev;
|
||||||
|
|
||||||
assert_rpm_wakelock_held(dev_priv);
|
assert_rpm_wakelock_held(i915);
|
||||||
pm_runtime_get_noresume(kdev);
|
pm_runtime_get_noresume(kdev);
|
||||||
|
|
||||||
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
|
track_intel_runtime_pm_wakeref(i915);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_runtime_pm_put - release a runtime pm reference
|
* intel_runtime_pm_put - release a runtime pm reference
|
||||||
* @dev_priv: i915 device instance
|
* @i915: i915 device instance
|
||||||
*
|
*
|
||||||
* This function drops the device-level runtime pm reference obtained by
|
* This function drops the device-level runtime pm reference obtained by
|
||||||
* intel_runtime_pm_get() and might power down the corresponding
|
* intel_runtime_pm_get() and might power down the corresponding
|
||||||
* hardware block right away if this is the last reference.
|
* hardware block right away if this is the last reference.
|
||||||
*/
|
*/
|
||||||
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
|
void intel_runtime_pm_put(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
struct pci_dev *pdev = i915->drm.pdev;
|
||||||
struct device *kdev = &pdev->dev;
|
struct device *kdev = &pdev->dev;
|
||||||
|
|
||||||
assert_rpm_wakelock_held(dev_priv);
|
untrack_intel_runtime_pm_wakeref(i915);
|
||||||
atomic_dec(&dev_priv->runtime_pm.wakeref_count);
|
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(kdev);
|
pm_runtime_mark_last_busy(kdev);
|
||||||
pm_runtime_put_autosuspend(kdev);
|
pm_runtime_put_autosuspend(kdev);
|
||||||
|
@ -4092,7 +4303,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_runtime_pm_enable - enable runtime pm
|
* intel_runtime_pm_enable - enable runtime pm
|
||||||
* @dev_priv: i915 device instance
|
* @i915: i915 device instance
|
||||||
*
|
*
|
||||||
* This function enables runtime pm at the end of the driver load sequence.
|
* This function enables runtime pm at the end of the driver load sequence.
|
||||||
*
|
*
|
||||||
|
@ -4100,9 +4311,9 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
|
||||||
* subordinate display power domains. That is done by
|
* subordinate display power domains. That is done by
|
||||||
* intel_power_domains_enable().
|
* intel_power_domains_enable().
|
||||||
*/
|
*/
|
||||||
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
|
void intel_runtime_pm_enable(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
struct pci_dev *pdev = i915->drm.pdev;
|
||||||
struct device *kdev = &pdev->dev;
|
struct device *kdev = &pdev->dev;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4124,7 +4335,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
|
||||||
* so the driver's own RPM reference tracking asserts also work on
|
* so the driver's own RPM reference tracking asserts also work on
|
||||||
* platforms without RPM support.
|
* platforms without RPM support.
|
||||||
*/
|
*/
|
||||||
if (!HAS_RUNTIME_PM(dev_priv)) {
|
if (!HAS_RUNTIME_PM(i915)) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
pm_runtime_dont_use_autosuspend(kdev);
|
pm_runtime_dont_use_autosuspend(kdev);
|
||||||
|
@ -4142,17 +4353,35 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
|
||||||
pm_runtime_put_autosuspend(kdev);
|
pm_runtime_put_autosuspend(kdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
|
void intel_runtime_pm_disable(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
struct pci_dev *pdev = i915->drm.pdev;
|
||||||
struct device *kdev = &pdev->dev;
|
struct device *kdev = &pdev->dev;
|
||||||
|
|
||||||
/* Transfer rpm ownership back to core */
|
/* Transfer rpm ownership back to core */
|
||||||
WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0,
|
WARN(pm_runtime_get_sync(kdev) < 0,
|
||||||
"Failed to pass rpm ownership back to core\n");
|
"Failed to pass rpm ownership back to core\n");
|
||||||
|
|
||||||
pm_runtime_dont_use_autosuspend(kdev);
|
pm_runtime_dont_use_autosuspend(kdev);
|
||||||
|
|
||||||
if (!HAS_RUNTIME_PM(dev_priv))
|
if (!HAS_RUNTIME_PM(i915))
|
||||||
pm_runtime_put(kdev);
|
pm_runtime_put(kdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
struct i915_runtime_pm *rpm = &i915->runtime_pm;
|
||||||
|
int count;
|
||||||
|
|
||||||
|
count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
|
||||||
|
WARN(count,
|
||||||
|
"i915->runtime_pm.wakeref_count=%d on cleanup\n",
|
||||||
|
count);
|
||||||
|
|
||||||
|
untrack_intel_runtime_pm_wakeref(i915);
|
||||||
|
}
|
||||||
|
|
||||||
|
void intel_runtime_pm_init_early(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
init_intel_runtime_pm_wakeref(i915);
|
||||||
|
}
|
||||||
|
|
|
@ -154,15 +154,17 @@ struct drm_i915_private *mock_gem_device(void)
|
||||||
pdev->dev.archdata.iommu = (void *)-1;
|
pdev->dev.archdata.iommu = (void *)-1;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
i915 = (struct drm_i915_private *)(pdev + 1);
|
||||||
|
pci_set_drvdata(pdev, i915);
|
||||||
|
|
||||||
|
intel_runtime_pm_init_early(i915);
|
||||||
|
|
||||||
dev_pm_domain_set(&pdev->dev, &pm_domain);
|
dev_pm_domain_set(&pdev->dev, &pm_domain);
|
||||||
pm_runtime_enable(&pdev->dev);
|
pm_runtime_enable(&pdev->dev);
|
||||||
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
||||||
if (pm_runtime_enabled(&pdev->dev))
|
if (pm_runtime_enabled(&pdev->dev))
|
||||||
WARN_ON(pm_runtime_get_sync(&pdev->dev));
|
WARN_ON(pm_runtime_get_sync(&pdev->dev));
|
||||||
|
|
||||||
i915 = (struct drm_i915_private *)(pdev + 1);
|
|
||||||
pci_set_drvdata(pdev, i915);
|
|
||||||
|
|
||||||
err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
|
err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("Failed to initialise mock GEM device: err=%d\n", err);
|
pr_err("Failed to initialise mock GEM device: err=%d\n", err);
|
||||||
|
|
Loading…
Reference in New Issue