mirror of https://gitee.com/openkylin/linux.git
drm/i915: Don't downclock whilst we have clients waiting for GPU results
If we have clients stalled waiting for requests, ignore the GPU if it signals that it should downclock due to low load. This helps prevent the automatic timeout from causing extremely long running batches from taking even longer. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
2e1b873072
commit
f5a4c67d52
|
@ -2282,6 +2282,18 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int count_irq_waiters(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
struct intel_engine_cs *ring;
|
||||||
|
int count = 0;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for_each_ring(ring, i915, i)
|
||||||
|
count += ring->irq_refcount;
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
static int i915_rps_boost_info(struct seq_file *m, void *data)
|
static int i915_rps_boost_info(struct seq_file *m, void *data)
|
||||||
{
|
{
|
||||||
struct drm_info_node *node = m->private;
|
struct drm_info_node *node = m->private;
|
||||||
|
@ -2298,6 +2310,15 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
|
seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
|
||||||
|
seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
|
||||||
|
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
|
||||||
|
seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
|
||||||
|
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
|
||||||
|
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
|
||||||
|
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
|
||||||
|
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
|
||||||
|
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
|
||||||
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
|
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
|
||||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
|
|
|
@ -1070,6 +1070,18 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
|
||||||
return events;
|
return events;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool any_waiters(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
struct intel_engine_cs *ring;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for_each_ring(ring, dev_priv, i)
|
||||||
|
if (ring->irq_refcount)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void gen6_pm_rps_work(struct work_struct *work)
|
static void gen6_pm_rps_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv =
|
struct drm_i915_private *dev_priv =
|
||||||
|
@ -1114,6 +1126,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
||||||
new_delay = dev_priv->rps.efficient_freq;
|
new_delay = dev_priv->rps.efficient_freq;
|
||||||
adj = 0;
|
adj = 0;
|
||||||
}
|
}
|
||||||
|
} else if (any_waiters(dev_priv)) {
|
||||||
|
adj = 0;
|
||||||
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
|
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
|
||||||
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
|
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
|
||||||
new_delay = dev_priv->rps.efficient_freq;
|
new_delay = dev_priv->rps.efficient_freq;
|
||||||
|
|
Loading…
Reference in New Issue