drm/i915: Verify the engine workarounds stick on application

Read the engine workarounds back using the GPU after loading the initial
context state to verify that we are setting them correctly, and bail if
it fails.

v2: Break out the verification into its own loop

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190417075657.19456-3-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-04-17 08:56:28 +01:00
parent 4f1cb5875c
commit 254e11864a
4 changed files with 149 additions and 47 deletions

View File

@ -4842,6 +4842,23 @@ static void i915_gem_fini_scratch(struct drm_i915_private *i915)
i915_vma_unpin_and_release(&i915->gt.scratch, 0);
}
static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return 0;
for_each_engine(engine, i915, id) {
if (intel_engine_verify_workarounds(engine, "load"))
err = -EIO;
}
return err;
}
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@ -4927,6 +4944,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_init_clock_gating(dev_priv);
ret = intel_engines_verify_workarounds(dev_priv);
if (ret)
goto err_init_hw;
ret = __intel_engines_record_defaults(dev_priv);
if (ret)
goto err_init_hw;

View File

@ -1259,6 +1259,126 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(engine->uncore, &engine->wa_list);
}
static struct i915_vma *
create_scratch(struct i915_address_space *vm, int count)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int size;
int err;
size = round_up(count * sizeof(u32), PAGE_SIZE);
obj = i915_gem_object_create_internal(vm->i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
}
err = i915_vma_pin(vma, 0, 0,
i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
if (err)
goto err_obj;
return vma;
err_obj:
i915_gem_object_put(obj);
return ERR_PTR(err);
}
static int
wa_list_srm(struct i915_request *rq,
const struct i915_wa_list *wal,
struct i915_vma *vma)
{
const struct i915_wa *wa;
unsigned int i;
u32 srm, *cs;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
if (INTEL_GEN(rq->i915) >= 8)
srm++;
cs = intel_ring_begin(rq, 4 * wal->count);
if (IS_ERR(cs))
return PTR_ERR(cs);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
*cs++ = srm;
*cs++ = i915_mmio_reg_offset(wa->reg);
*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
*cs++ = 0;
}
intel_ring_advance(rq, cs);
return 0;
}
static int engine_wa_list_verify(struct intel_engine_cs *engine,
const struct i915_wa_list * const wal,
const char *from)
{
const struct i915_wa *wa;
struct i915_request *rq;
struct i915_vma *vma;
unsigned int i;
u32 *results;
int err;
if (!wal->count)
return 0;
vma = create_scratch(&engine->i915->ggtt.vm, wal->count);
if (IS_ERR(vma))
return PTR_ERR(vma);
rq = i915_request_alloc(engine, engine->kernel_context->gem_context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_vma;
}
err = wa_list_srm(rq, wal, vma);
if (err)
goto err_vma;
i915_request_add(rq);
if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
err = -ETIME;
goto err_vma;
}
results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(results)) {
err = PTR_ERR(results);
goto err_vma;
}
err = 0;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
if (!wa_verify(wa, results[i], wal->name, from))
err = -ENXIO;
i915_gem_object_unpin_map(vma->obj);
err_vma:
i915_vma_unpin(vma);
i915_vma_put(vma);
return err;
}
int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
const char *from)
{
return engine_wa_list_verify(engine, &engine->wa_list, from);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_workarounds.c"
#endif

View File

@ -30,5 +30,7 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
void intel_engine_init_workarounds(struct intel_engine_cs *engine);
void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
const char *from);
#endif

View File

@ -340,49 +340,6 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
return err;
}
static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
void *ptr;
int err;
obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(ptr)) {
err = PTR_ERR(ptr);
goto err_obj;
}
memset(ptr, 0xc5, PAGE_SIZE);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
goto err_obj;
err = i915_gem_object_set_to_cpu_domain(obj, false);
if (err)
goto err_obj;
return vma;
err_obj:
i915_gem_object_put(obj);
return ERR_PTR(err);
}
static struct i915_vma *create_batch(struct i915_gem_context *ctx)
{
struct drm_i915_gem_object *obj;
@ -475,7 +432,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
int err = 0, i, v;
u32 *cs, *results;
scratch = create_scratch(ctx);
scratch = create_scratch(&ctx->ppgtt->vm, 2 * ARRAY_SIZE(values) + 1);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@ -752,9 +709,11 @@ static bool verify_gt_engine_wa(struct drm_i915_private *i915,
ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
for_each_engine(engine, i915, id)
ok &= wa_list_verify(engine->uncore,
&lists->engine[id].wa_list, str);
for_each_engine(engine, i915, id) {
ok &= engine_wa_list_verify(engine,
&lists->engine[id].wa_list,
str) == 0;
}
return ok;
}