mirror of https://gitee.com/openkylin/linux.git
drm/i915: move all rps state into dev_priv->rps
This way it's easier so see what belongs together, and what is used by the ilk ips code. Also add some comments that explain the locking. Note that (cur|min|max)_delay need to be duplicated, because they're also used by the ips code. v2: Missed one place that the dev_priv->ips change caught ... Reviewed-by: Ben Widawsky <ben@bwidawsk.net> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
22bcfc6a4b
commit
c6a828d326
|
@ -1274,7 +1274,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
||||||
|
|
||||||
seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
|
seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
|
||||||
|
|
||||||
for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
|
for (gpu_freq = dev_priv->rps.min_delay;
|
||||||
|
gpu_freq <= dev_priv->rps.max_delay;
|
||||||
gpu_freq++) {
|
gpu_freq++) {
|
||||||
I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
|
I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
|
||||||
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
|
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
|
||||||
|
@ -1712,7 +1713,7 @@ i915_max_freq_read(struct file *filp,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
len = snprintf(buf, sizeof(buf),
|
len = snprintf(buf, sizeof(buf),
|
||||||
"max freq: %d\n", dev_priv->max_delay * 50);
|
"max freq: %d\n", dev_priv->rps.max_delay * 50);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (len > sizeof(buf))
|
if (len > sizeof(buf))
|
||||||
|
@ -1755,7 +1756,7 @@ i915_max_freq_write(struct file *filp,
|
||||||
/*
|
/*
|
||||||
* Turbo will still be enabled, but won't go above the set value.
|
* Turbo will still be enabled, but won't go above the set value.
|
||||||
*/
|
*/
|
||||||
dev_priv->max_delay = val / 50;
|
dev_priv->rps.max_delay = val / 50;
|
||||||
|
|
||||||
gen6_set_rps(dev, val / 50);
|
gen6_set_rps(dev, val / 50);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
@ -1788,7 +1789,7 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
len = snprintf(buf, sizeof(buf),
|
len = snprintf(buf, sizeof(buf),
|
||||||
"min freq: %d\n", dev_priv->min_delay * 50);
|
"min freq: %d\n", dev_priv->rps.min_delay * 50);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (len > sizeof(buf))
|
if (len > sizeof(buf))
|
||||||
|
@ -1829,7 +1830,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||||
/*
|
/*
|
||||||
* Turbo will still be enabled, but won't go below the set value.
|
* Turbo will still be enabled, but won't go below the set value.
|
||||||
*/
|
*/
|
||||||
dev_priv->min_delay = val / 50;
|
dev_priv->rps.min_delay = val / 50;
|
||||||
|
|
||||||
gen6_set_rps(dev, val / 50);
|
gen6_set_rps(dev, val / 50);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
|
@ -1605,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||||
|
|
||||||
spin_lock_init(&dev_priv->irq_lock);
|
spin_lock_init(&dev_priv->irq_lock);
|
||||||
spin_lock_init(&dev_priv->error_lock);
|
spin_lock_init(&dev_priv->error_lock);
|
||||||
spin_lock_init(&dev_priv->rps_lock);
|
spin_lock_init(&dev_priv->rps.lock);
|
||||||
|
|
||||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
||||||
dev_priv->num_pipe = 3;
|
dev_priv->num_pipe = 3;
|
||||||
|
|
|
@ -819,9 +819,21 @@ typedef struct drm_i915_private {
|
||||||
|
|
||||||
bool mchbar_need_disable;
|
bool mchbar_need_disable;
|
||||||
|
|
||||||
struct work_struct rps_work;
|
/* gen6+ rps state */
|
||||||
spinlock_t rps_lock;
|
struct {
|
||||||
u32 pm_iir;
|
struct work_struct work;
|
||||||
|
u32 pm_iir;
|
||||||
|
/* lock - irqsave spinlock that protectects the work_struct and
|
||||||
|
* pm_iir. */
|
||||||
|
spinlock_t lock;
|
||||||
|
|
||||||
|
/* The below variables an all the rps hw state are protected by
|
||||||
|
* dev->struct mutext. */
|
||||||
|
u8 cur_delay;
|
||||||
|
u8 min_delay;
|
||||||
|
u8 max_delay;
|
||||||
|
} rps;
|
||||||
|
|
||||||
|
|
||||||
u8 cur_delay;
|
u8 cur_delay;
|
||||||
u8 min_delay;
|
u8 min_delay;
|
||||||
|
|
|
@ -349,16 +349,16 @@ static void notify_ring(struct drm_device *dev,
|
||||||
static void gen6_pm_rps_work(struct work_struct *work)
|
static void gen6_pm_rps_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
|
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
|
||||||
rps_work);
|
rps.work);
|
||||||
u32 pm_iir, pm_imr;
|
u32 pm_iir, pm_imr;
|
||||||
u8 new_delay;
|
u8 new_delay;
|
||||||
|
|
||||||
spin_lock_irq(&dev_priv->rps_lock);
|
spin_lock_irq(&dev_priv->rps.lock);
|
||||||
pm_iir = dev_priv->pm_iir;
|
pm_iir = dev_priv->rps.pm_iir;
|
||||||
dev_priv->pm_iir = 0;
|
dev_priv->rps.pm_iir = 0;
|
||||||
pm_imr = I915_READ(GEN6_PMIMR);
|
pm_imr = I915_READ(GEN6_PMIMR);
|
||||||
I915_WRITE(GEN6_PMIMR, 0);
|
I915_WRITE(GEN6_PMIMR, 0);
|
||||||
spin_unlock_irq(&dev_priv->rps_lock);
|
spin_unlock_irq(&dev_priv->rps.lock);
|
||||||
|
|
||||||
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
|
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
|
||||||
return;
|
return;
|
||||||
|
@ -366,9 +366,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
||||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||||
|
|
||||||
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
|
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
|
||||||
new_delay = dev_priv->cur_delay + 1;
|
new_delay = dev_priv->rps.cur_delay + 1;
|
||||||
else
|
else
|
||||||
new_delay = dev_priv->cur_delay - 1;
|
new_delay = dev_priv->rps.cur_delay - 1;
|
||||||
|
|
||||||
gen6_set_rps(dev_priv->dev, new_delay);
|
gen6_set_rps(dev_priv->dev, new_delay);
|
||||||
|
|
||||||
|
@ -488,20 +488,20 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
|
||||||
* IIR bits should never already be set because IMR should
|
* IIR bits should never already be set because IMR should
|
||||||
* prevent an interrupt from being shown in IIR. The warning
|
* prevent an interrupt from being shown in IIR. The warning
|
||||||
* displays a case where we've unsafely cleared
|
* displays a case where we've unsafely cleared
|
||||||
* dev_priv->pm_iir. Although missing an interrupt of the same
|
* dev_priv->rps.pm_iir. Although missing an interrupt of the same
|
||||||
* type is not a problem, it displays a problem in the logic.
|
* type is not a problem, it displays a problem in the logic.
|
||||||
*
|
*
|
||||||
* The mask bit in IMR is cleared by rps_work.
|
* The mask bit in IMR is cleared by dev_priv->rps.work.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->rps_lock, flags);
|
spin_lock_irqsave(&dev_priv->rps.lock, flags);
|
||||||
WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
|
WARN(dev_priv->rps.pm_iir & pm_iir, "Missed a PM interrupt\n");
|
||||||
dev_priv->pm_iir |= pm_iir;
|
dev_priv->rps.pm_iir |= pm_iir;
|
||||||
I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
|
I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
|
||||||
POSTING_READ(GEN6_PMIMR);
|
POSTING_READ(GEN6_PMIMR);
|
||||||
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
|
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
|
||||||
|
|
||||||
queue_work(dev_priv->wq, &dev_priv->rps_work);
|
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
|
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
|
||||||
|
@ -2649,7 +2649,7 @@ void intel_irq_init(struct drm_device *dev)
|
||||||
|
|
||||||
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
|
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
|
||||||
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
|
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
|
||||||
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
|
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
|
||||||
INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
|
INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
|
||||||
|
|
||||||
dev->driver->get_vblank_counter = i915_get_vblank_counter;
|
dev->driver->get_vblank_counter = i915_get_vblank_counter;
|
||||||
|
|
|
@ -7218,7 +7218,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
||||||
* enqueue unpin/hotplug work. */
|
* enqueue unpin/hotplug work. */
|
||||||
drm_irq_uninstall(dev);
|
drm_irq_uninstall(dev);
|
||||||
cancel_work_sync(&dev_priv->hotplug_work);
|
cancel_work_sync(&dev_priv->hotplug_work);
|
||||||
cancel_work_sync(&dev_priv->rps_work);
|
cancel_work_sync(&dev_priv->rps.work);
|
||||||
|
|
||||||
/* flush any delayed tasks or pending work */
|
/* flush any delayed tasks or pending work */
|
||||||
flush_scheduled_work();
|
flush_scheduled_work();
|
||||||
|
|
|
@ -2277,9 +2277,10 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
|
||||||
u32 limits;
|
u32 limits;
|
||||||
|
|
||||||
limits = 0;
|
limits = 0;
|
||||||
if (*val >= dev_priv->max_delay)
|
|
||||||
*val = dev_priv->max_delay;
|
if (*val >= dev_priv->rps.max_delay)
|
||||||
limits |= dev_priv->max_delay << 24;
|
*val = dev_priv->rps.max_delay;
|
||||||
|
limits |= dev_priv->rps.max_delay << 24;
|
||||||
|
|
||||||
/* Only set the down limit when we've reached the lowest level to avoid
|
/* Only set the down limit when we've reached the lowest level to avoid
|
||||||
* getting more interrupts, otherwise leave this clear. This prevents a
|
* getting more interrupts, otherwise leave this clear. This prevents a
|
||||||
|
@ -2287,9 +2288,9 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
|
||||||
* the hw runs at the minimal clock before selecting the desired
|
* the hw runs at the minimal clock before selecting the desired
|
||||||
* frequency, if the down threshold expires in that window we will not
|
* frequency, if the down threshold expires in that window we will not
|
||||||
* receive a down interrupt. */
|
* receive a down interrupt. */
|
||||||
if (*val <= dev_priv->min_delay) {
|
if (*val <= dev_priv->rps.min_delay) {
|
||||||
*val = dev_priv->min_delay;
|
*val = dev_priv->rps.min_delay;
|
||||||
limits |= dev_priv->min_delay << 16;
|
limits |= dev_priv->rps.min_delay << 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
return limits;
|
return limits;
|
||||||
|
@ -2302,7 +2303,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||||
|
|
||||||
if (val == dev_priv->cur_delay)
|
if (val == dev_priv->rps.cur_delay)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
I915_WRITE(GEN6_RPNSWREQ,
|
I915_WRITE(GEN6_RPNSWREQ,
|
||||||
|
@ -2315,7 +2316,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
|
||||||
*/
|
*/
|
||||||
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
|
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
|
||||||
|
|
||||||
dev_priv->cur_delay = val;
|
dev_priv->rps.cur_delay = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen6_disable_rps(struct drm_device *dev)
|
static void gen6_disable_rps(struct drm_device *dev)
|
||||||
|
@ -2331,9 +2332,9 @@ static void gen6_disable_rps(struct drm_device *dev)
|
||||||
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
|
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
|
||||||
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
|
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
|
||||||
|
|
||||||
spin_lock_irq(&dev_priv->rps_lock);
|
spin_lock_irq(&dev_priv->rps.lock);
|
||||||
dev_priv->pm_iir = 0;
|
dev_priv->rps.pm_iir = 0;
|
||||||
spin_unlock_irq(&dev_priv->rps_lock);
|
spin_unlock_irq(&dev_priv->rps.lock);
|
||||||
|
|
||||||
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
|
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
|
||||||
}
|
}
|
||||||
|
@ -2402,9 +2403,9 @@ static void gen6_enable_rps(struct drm_device *dev)
|
||||||
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
|
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
|
||||||
|
|
||||||
/* In units of 100MHz */
|
/* In units of 100MHz */
|
||||||
dev_priv->max_delay = rp_state_cap & 0xff;
|
dev_priv->rps.max_delay = rp_state_cap & 0xff;
|
||||||
dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
|
dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
|
||||||
dev_priv->cur_delay = 0;
|
dev_priv->rps.cur_delay = 0;
|
||||||
|
|
||||||
/* disable the counters and set deterministic thresholds */
|
/* disable the counters and set deterministic thresholds */
|
||||||
I915_WRITE(GEN6_RC_CONTROL, 0);
|
I915_WRITE(GEN6_RC_CONTROL, 0);
|
||||||
|
@ -2457,8 +2458,8 @@ static void gen6_enable_rps(struct drm_device *dev)
|
||||||
|
|
||||||
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
|
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
|
||||||
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
|
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
|
||||||
dev_priv->max_delay << 24 |
|
dev_priv->rps.max_delay << 24 |
|
||||||
dev_priv->min_delay << 16);
|
dev_priv->rps.min_delay << 16);
|
||||||
|
|
||||||
if (IS_HASWELL(dev)) {
|
if (IS_HASWELL(dev)) {
|
||||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
|
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
|
||||||
|
@ -2503,7 +2504,7 @@ static void gen6_enable_rps(struct drm_device *dev)
|
||||||
500))
|
500))
|
||||||
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
|
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
|
||||||
if (pcu_mbox & (1<<31)) { /* OC supported */
|
if (pcu_mbox & (1<<31)) { /* OC supported */
|
||||||
dev_priv->max_delay = pcu_mbox & 0xff;
|
dev_priv->rps.max_delay = pcu_mbox & 0xff;
|
||||||
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
|
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2511,10 +2512,10 @@ static void gen6_enable_rps(struct drm_device *dev)
|
||||||
|
|
||||||
/* requires MSI enabled */
|
/* requires MSI enabled */
|
||||||
I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
|
I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
|
||||||
spin_lock_irq(&dev_priv->rps_lock);
|
spin_lock_irq(&dev_priv->rps.lock);
|
||||||
WARN_ON(dev_priv->pm_iir != 0);
|
WARN_ON(dev_priv->rps.pm_iir != 0);
|
||||||
I915_WRITE(GEN6_PMIMR, 0);
|
I915_WRITE(GEN6_PMIMR, 0);
|
||||||
spin_unlock_irq(&dev_priv->rps_lock);
|
spin_unlock_irq(&dev_priv->rps.lock);
|
||||||
/* enable all PM interrupts */
|
/* enable all PM interrupts */
|
||||||
I915_WRITE(GEN6_PMINTRMSK, 0);
|
I915_WRITE(GEN6_PMINTRMSK, 0);
|
||||||
|
|
||||||
|
@ -2546,9 +2547,9 @@ static void gen6_update_ring_freq(struct drm_device *dev)
|
||||||
* to use for memory access. We do this by specifying the IA frequency
|
* to use for memory access. We do this by specifying the IA frequency
|
||||||
* the PCU should use as a reference to determine the ring frequency.
|
* the PCU should use as a reference to determine the ring frequency.
|
||||||
*/
|
*/
|
||||||
for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
|
for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
|
||||||
gpu_freq--) {
|
gpu_freq--) {
|
||||||
int diff = dev_priv->max_delay - gpu_freq;
|
int diff = dev_priv->rps.max_delay - gpu_freq;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For GPU frequencies less than 750MHz, just use the lowest
|
* For GPU frequencies less than 750MHz, just use the lowest
|
||||||
|
@ -2991,7 +2992,7 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
|
||||||
|
|
||||||
assert_spin_locked(&mchdev_lock);
|
assert_spin_locked(&mchdev_lock);
|
||||||
|
|
||||||
pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
|
pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
|
||||||
pxvid = (pxvid >> 24) & 0x7f;
|
pxvid = (pxvid >> 24) & 0x7f;
|
||||||
ext_v = pvid_to_extvid(dev_priv, pxvid);
|
ext_v = pvid_to_extvid(dev_priv, pxvid);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue