mirror of https://gitee.com/openkylin/linux.git
drm/i915: Spin until breadcrumb threads are complete
When we need to reset the global seqno on wraparound, we have to wait until the current rbtrees are drained (or otherwise the next waiter will be out of sequence). The current mechanism to kick and spin until complete, may exit too early as it would break if the target thread was currently running. Instead, we must wake up the threads, but keep spinning until the trees have been deleted. In order to appease Tvrtko, busy spin rather than yield(). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161108143719.32215-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
This commit is contained in:
parent
677100ce15
commit
6a5d1db98e
|
@ -241,9 +241,8 @@ static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
|
|||
|
||||
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
|
||||
if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
|
||||
while (intel_kick_waiters(i915) || intel_kick_signalers(i915))
|
||||
yield();
|
||||
yield();
|
||||
while (intel_breadcrumbs_busy(i915))
|
||||
cond_resched(); /* spin until threads are complete */
|
||||
}
|
||||
atomic_set(&timeline->next_seqno, seqno);
|
||||
|
||||
|
|
|
@ -629,35 +629,28 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
|
|||
cancel_fake_irq(engine);
|
||||
}
|
||||
|
||||
unsigned int intel_kick_waiters(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
unsigned int mask = 0;
|
||||
|
||||
/* To avoid the task_struct disappearing beneath us as we wake up
|
||||
* the process, we must first inspect the task_struct->state under the
|
||||
* RCU lock, i.e. as we call wake_up_process() we must be holding the
|
||||
* rcu_read_lock().
|
||||
*/
|
||||
for_each_engine(engine, i915, id)
|
||||
if (unlikely(intel_engine_wakeup(engine)))
|
||||
mask |= intel_engine_flag(engine);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
unsigned int intel_kick_signalers(struct drm_i915_private *i915)
|
||||
unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
unsigned int mask = 0;
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
|
||||
wake_up_process(engine->breadcrumbs.signaler);
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
|
||||
spin_lock_irq(&b->lock);
|
||||
|
||||
if (b->first_wait) {
|
||||
wake_up_process(b->first_wait->tsk);
|
||||
mask |= intel_engine_flag(engine);
|
||||
}
|
||||
|
||||
if (b->first_signal) {
|
||||
wake_up_process(b->signaler);
|
||||
mask |= intel_engine_flag(engine);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&b->lock);
|
||||
}
|
||||
|
||||
return mask;
|
||||
|
|
|
@ -578,7 +578,6 @@ static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
|
|||
|
||||
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
|
||||
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
|
||||
unsigned int intel_kick_waiters(struct drm_i915_private *i915);
|
||||
unsigned int intel_kick_signalers(struct drm_i915_private *i915);
|
||||
unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
|
||||
|
||||
#endif /* _INTEL_RINGBUFFER_H_ */
|
||||
|
|
Loading…
Reference in New Issue