drm/i915/guc: Restore preempt-context across S3/S4

Stolen memory is lost across S4 (hibernate) or S3-RST as it is a portion
of ordinary volatile RAM. As we allocate our rings from stolen, this may
include the rings used for our preempt context and their breadcrumb
instructions. In order to allow preemption following hibernation and
loss of stolen memory, we therefore need to repopulate the instructions
inside the lost ring upon resume. To handle both module load and resume,
we simply defer constructing the ring to first use.

Testcase: igt/drv_selftest/live_gem
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180919205432.18394-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2018-09-19 21:54:32 +01:00
parent a47cd45a37
commit 8fcd86baab
1 changed files with 27 additions and 53 deletions

View File

@ -557,16 +557,36 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
u32 ctx_desc = lower_32_bits(to_intel_context(client->owner,
engine)->lrc_desc);
struct intel_context *ce = to_intel_context(client->owner, engine);
u32 data[7];
/*
* The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP.
* See guc_fill_preempt_context().
*/
if (!ce->ring->emit) { /* recreate upon load/resume */
u32 addr = intel_hws_preempt_done_address(engine);
u32 *cs;
cs = ce->ring->vaddr;
if (engine->id == RCS) {
cs = gen8_emit_ggtt_write_rcs(cs,
GUC_PREEMPT_FINISHED,
addr);
} else {
cs = gen8_emit_ggtt_write(cs,
GUC_PREEMPT_FINISHED,
addr);
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
}
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
flush_ggtt_writes(ce->ring->vma);
}
spin_lock_irq(&client->wq_lock);
guc_wq_item_append(client, engine->guc_id, ctx_desc,
guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
spin_unlock_irq(&client->wq_lock);
@ -1044,50 +1064,6 @@ static inline bool ctx_save_restore_disabled(struct intel_context *ce)
#undef SR_DISABLED
}
static void guc_fill_preempt_context(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_guc_client *client = guc->preempt_client;
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
struct intel_context *ce =
to_intel_context(client->owner, engine);
u32 addr = intel_hws_preempt_done_address(engine);
u32 *cs;
GEM_BUG_ON(!ce->pin_count);
/*
* We rely on this context image *not* being saved after
* preemption. This ensures that the RING_HEAD / RING_TAIL
* remain pointing at initial values forever.
*/
GEM_BUG_ON(!ctx_save_restore_disabled(ce));
cs = ce->ring->vaddr;
if (id == RCS) {
cs = gen8_emit_ggtt_write_rcs(cs,
GUC_PREEMPT_FINISHED,
addr);
} else {
cs = gen8_emit_ggtt_write(cs,
GUC_PREEMPT_FINISHED,
addr);
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
}
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
GEM_BUG_ON((void *)cs - ce->ring->vaddr !=
GUC_PREEMPT_BREADCRUMB_BYTES);
flush_ggtt_writes(ce->ring->vma);
}
}
static int guc_clients_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@ -1118,8 +1094,6 @@ static int guc_clients_create(struct intel_guc *guc)
return PTR_ERR(client);
}
guc->preempt_client = client;
guc_fill_preempt_context(guc);
}
return 0;