mirror of https://gitee.com/openkylin/linux.git
drm/i915/gt: Repeat wait_for_idle for retirement workers
Since we may retire timelines from secondary workers, intel_gt_retire_requests() is not always a reliable indicator that all pending retirements are complete. If we do detect secondary workers are in progress, recommend intel_gt_wait_for_idle() to repeat the retirement check. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Acked-by: Andi Shyti <andi.shyti@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191221180204.1201217-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
e6ba764802
commit
4856254d48
|
@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
|
|||
|
||||
bool intel_engines_are_idle(struct intel_gt *gt);
|
||||
bool intel_engine_is_idle(struct intel_engine_cs *engine);
|
||||
void intel_engine_flush_submission(struct intel_engine_cs *engine);
|
||||
bool intel_engine_flush_submission(struct intel_engine_cs *engine);
|
||||
|
||||
void intel_engines_reset_default_submission(struct intel_gt *gt);
|
||||
|
||||
|
|
|
@ -1079,9 +1079,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
|
|||
return idle;
|
||||
}
|
||||
|
||||
void intel_engine_flush_submission(struct intel_engine_cs *engine)
|
||||
bool intel_engine_flush_submission(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct tasklet_struct *t = &engine->execlists.tasklet;
|
||||
bool active = tasklet_is_locked(t);
|
||||
|
||||
if (__tasklet_is_scheduled(t)) {
|
||||
local_bh_disable();
|
||||
|
@ -1092,10 +1093,13 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
|
|||
tasklet_unlock(t);
|
||||
}
|
||||
local_bh_enable();
|
||||
active = true;
|
||||
}
|
||||
|
||||
/* Otherwise flush the tasklet if it was running on another cpu */
|
||||
tasklet_unlock_wait(t);
|
||||
|
||||
return active;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -23,15 +23,18 @@ static void retire_requests(struct intel_timeline *tl)
|
|||
break;
|
||||
}
|
||||
|
||||
static void flush_submission(struct intel_gt *gt)
|
||||
static bool flush_submission(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
bool active = false;
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
intel_engine_flush_submission(engine);
|
||||
flush_work(&engine->retire_work);
|
||||
active |= intel_engine_flush_submission(engine);
|
||||
active |= flush_work(&engine->retire_work);
|
||||
}
|
||||
|
||||
return active;
|
||||
}
|
||||
|
||||
static void engine_retire(struct work_struct *work)
|
||||
|
@ -120,9 +123,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||
|
||||
spin_lock(&timelines->lock);
|
||||
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
|
||||
active_count++; /* report busy to caller, try again? */
|
||||
if (!mutex_trylock(&tl->mutex))
|
||||
if (!mutex_trylock(&tl->mutex)) {
|
||||
active_count++; /* report busy to caller, try again? */
|
||||
continue;
|
||||
}
|
||||
|
||||
intel_timeline_get(tl);
|
||||
GEM_BUG_ON(!atomic_read(&tl->active_count));
|
||||
|
@ -147,10 +151,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||
|
||||
/* Resume iteration after dropping lock */
|
||||
list_safe_reset_next(tl, tn, link);
|
||||
if (atomic_dec_and_test(&tl->active_count)) {
|
||||
if (atomic_dec_and_test(&tl->active_count))
|
||||
list_del(&tl->link);
|
||||
active_count--;
|
||||
}
|
||||
else
|
||||
active_count += i915_active_fence_isset(&tl->last_request);
|
||||
|
||||
mutex_unlock(&tl->mutex);
|
||||
|
||||
|
@ -165,7 +169,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||
list_for_each_entry_safe(tl, tn, &free, link)
|
||||
__intel_timeline_free(&tl->kref);
|
||||
|
||||
flush_submission(gt);
|
||||
if (flush_submission(gt))
|
||||
active_count++;
|
||||
|
||||
return active_count ? timeout : 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue