drm/i915: Separate GPU hang waitqueue from advance

Currently __i915_wait_request uses a per-engine wait_queue_t for the dual
purpose of waking after the GPU advances or for waking after an error.
In the future, we may add even more wake sources and require greater
separation, but for now we can conceptually simplify wakeups by separating
the two sources. In particular, this allows us to use different wait-queues
(e.g. one on the engine advancement, a global one for errors and one on
each requests) without any hassle.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1467390209-3576-5-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2016-07-01 17:23:14 +01:00
parent 26a02b8fc3
commit 1f15b76f1e
3 changed files with 15 additions and 15 deletions

View File

@ -1409,6 +1409,12 @@ struct i915_gpu_error {
#define I915_RESET_IN_PROGRESS_FLAG 1
#define I915_WEDGED (1 << 31)
/**
* Waitqueue to signal when a hang is detected. Used to for waiters
* to release the struct_mutex for the reset to procede.
*/
wait_queue_head_t wait_queue;
/**
* Waitqueue to signal when the reset has completed. Used by clients
* that wait for dev_priv->mm.wedged to settle.

View File

@ -1455,6 +1455,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(reset);
DEFINE_WAIT(wait);
unsigned long timeout_expire;
s64 before = 0; /* Only to silence a compiler warning. */
@ -1499,6 +1500,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
goto out;
}
add_wait_queue(&dev_priv->gpu_error.wait_queue, &reset);
for (;;) {
struct timer_list timer;
@ -1557,6 +1559,8 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
destroy_timer_on_stack(&timer);
}
}
remove_wait_queue(&dev_priv->gpu_error.wait_queue, &reset);
if (!irq_test_in_progress)
engine->irq_put(engine);
@ -5287,6 +5291,7 @@ i915_gem_load_init(struct drm_device *dev)
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;

View File

@ -2488,11 +2488,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return ret;
}
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
static void i915_error_wake_up(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
/*
* Notify all waiters for GPU completion events that reset state has
* been changed, and that they need to restart their wait after
@ -2501,18 +2498,10 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
*/
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
for_each_engine(engine, dev_priv)
wake_up_all(&engine->irq_queue);
wake_up_all(&dev_priv->gpu_error.wait_queue);
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
wake_up_all(&dev_priv->pending_flip_queue);
/*
* Signal tasks blocked in i915_gem_wait_for_error that the pending
* reset state is cleared.
*/
if (reset_completed)
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
/**
@ -2577,7 +2566,7 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
* Note: The wake_up also serves as a memory barrier so that
* waiters see the update value of the reset counter atomic_t.
*/
i915_error_wake_up(dev_priv, true);
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
}
@ -2714,7 +2703,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
* ensure that the waiters see the updated value of the reset
* counter atomic_t.
*/
i915_error_wake_up(dev_priv, false);
i915_error_wake_up(dev_priv);
}
i915_reset_and_wakeup(dev_priv);