mirror of https://gitee.com/openkylin/linux.git
drm/i915: Create/destroy VM (ppGTT) for use with contexts
In preparation to making the ppGTT binding for a context explicit (to facilitate reusing the same ppGTT between different contexts), allow the user to create and destroy named ppGTT. v2: Replace global barrier for swapping over the ppgtt and tlbs with a local context barrier (Tvrtko) v3: serialise with struct_mutex; it's lazy but required dammit v4: Rewrite igt_ctx_shared_exec to be more different (aimed to be more similarly, turned out different!) v5: Fix up test unwind for aliasing-ppgtt (snb) v6: Tighten language for uapi struct drm_i915_gem_vm_control. v7: Patch the context image for runtime ppgtt switching! Testcase: igt/gem_vm_create Testcase: igt/gem_ctx_param/vm Testcase: igt/gem_ctx_clone/vm Testcase: igt/gem_ctx_shared Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190322092325.5883-2-chris@chris-wilson.co.uk
This commit is contained in:
parent
9d1305ef80
commit
e0695db729
|
@ -3122,6 +3122,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
|
|||
DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
static struct drm_driver driver = {
|
||||
|
|
|
@ -220,6 +220,9 @@ struct drm_i915_file_private {
|
|||
struct idr context_idr;
|
||||
struct mutex context_idr_lock; /* guards context_idr */
|
||||
|
||||
struct idr vm_idr;
|
||||
struct mutex vm_idr_lock; /* guards vm_idr */
|
||||
|
||||
unsigned int bsd_engine;
|
||||
|
||||
/*
|
||||
|
|
|
@ -90,6 +90,7 @@
|
|||
#include "i915_drv.h"
|
||||
#include "i915_globals.h"
|
||||
#include "i915_trace.h"
|
||||
#include "i915_user_extensions.h"
|
||||
#include "intel_lrc_reg.h"
|
||||
#include "intel_workarounds.h"
|
||||
|
||||
|
@ -120,12 +121,15 @@ static void lut_close(struct i915_gem_context *ctx)
|
|||
list_del(&lut->obj_link);
|
||||
i915_lut_handle_free(lut);
|
||||
}
|
||||
INIT_LIST_HEAD(&ctx->handles_list);
|
||||
|
||||
rcu_read_lock();
|
||||
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
|
||||
struct i915_vma *vma = rcu_dereference_raw(*slot);
|
||||
|
||||
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
|
||||
|
||||
vma->open_count--;
|
||||
__i915_gem_object_release_unless_active(vma->obj);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -305,8 +309,6 @@ static void context_close(struct i915_gem_context *ctx)
|
|||
* the ppgtt).
|
||||
*/
|
||||
lut_close(ctx);
|
||||
if (ctx->ppgtt)
|
||||
i915_ppgtt_close(&ctx->ppgtt->vm);
|
||||
|
||||
ctx->file_priv = ERR_PTR(-EBADF);
|
||||
i915_gem_context_put(ctx);
|
||||
|
@ -378,6 +380,28 @@ __create_context(struct drm_i915_private *dev_priv)
|
|||
return ctx;
|
||||
}
|
||||
|
||||
static struct i915_hw_ppgtt *
|
||||
__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct i915_hw_ppgtt *old = ctx->ppgtt;
|
||||
|
||||
ctx->ppgtt = i915_ppgtt_get(ppgtt);
|
||||
ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
static void __assign_ppgtt(struct i915_gem_context *ctx,
|
||||
struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
if (ppgtt == ctx->ppgtt)
|
||||
return;
|
||||
|
||||
ppgtt = __set_ppgtt(ctx, ppgtt);
|
||||
if (ppgtt)
|
||||
i915_ppgtt_put(ppgtt);
|
||||
}
|
||||
|
||||
static struct i915_gem_context *
|
||||
i915_gem_create_context(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
|
@ -403,8 +427,8 @@ i915_gem_create_context(struct drm_i915_private *dev_priv)
|
|||
return ERR_CAST(ppgtt);
|
||||
}
|
||||
|
||||
ctx->ppgtt = ppgtt;
|
||||
ctx->desc_template = default_desc_template(dev_priv, ppgtt);
|
||||
__assign_ppgtt(ctx, ppgtt);
|
||||
i915_ppgtt_put(ppgtt);
|
||||
}
|
||||
|
||||
trace_i915_context_create(ctx);
|
||||
|
@ -583,6 +607,12 @@ static int context_idr_cleanup(int id, void *p, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vm_idr_cleanup(int id, void *p, void *data)
|
||||
{
|
||||
i915_ppgtt_put(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gem_context_register(struct i915_gem_context *ctx,
|
||||
struct drm_i915_file_private *fpriv)
|
||||
{
|
||||
|
@ -621,8 +651,11 @@ int i915_gem_context_open(struct drm_i915_private *i915,
|
|||
struct i915_gem_context *ctx;
|
||||
int err;
|
||||
|
||||
idr_init(&file_priv->context_idr);
|
||||
mutex_init(&file_priv->context_idr_lock);
|
||||
mutex_init(&file_priv->vm_idr_lock);
|
||||
|
||||
idr_init(&file_priv->context_idr);
|
||||
idr_init_base(&file_priv->vm_idr, 1);
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
ctx = i915_gem_create_context(i915);
|
||||
|
@ -646,8 +679,10 @@ int i915_gem_context_open(struct drm_i915_private *i915,
|
|||
context_close(ctx);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
err:
|
||||
mutex_destroy(&file_priv->context_idr_lock);
|
||||
idr_destroy(&file_priv->vm_idr);
|
||||
idr_destroy(&file_priv->context_idr);
|
||||
mutex_destroy(&file_priv->vm_idr_lock);
|
||||
mutex_destroy(&file_priv->context_idr_lock);
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
|
||||
|
@ -660,6 +695,99 @@ void i915_gem_context_close(struct drm_file *file)
|
|||
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
|
||||
idr_destroy(&file_priv->context_idr);
|
||||
mutex_destroy(&file_priv->context_idr_lock);
|
||||
|
||||
idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
|
||||
idr_destroy(&file_priv->vm_idr);
|
||||
mutex_destroy(&file_priv->vm_idr_lock);
|
||||
}
|
||||
|
||||
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
struct drm_i915_gem_vm_control *args = data;
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
int err;
|
||||
|
||||
if (!HAS_FULL_PPGTT(i915))
|
||||
return -ENODEV;
|
||||
|
||||
if (args->flags)
|
||||
return -EINVAL;
|
||||
|
||||
ppgtt = i915_ppgtt_create(i915);
|
||||
if (IS_ERR(ppgtt))
|
||||
return PTR_ERR(ppgtt);
|
||||
|
||||
ppgtt->vm.file = file_priv;
|
||||
|
||||
if (args->extensions) {
|
||||
err = i915_user_extensions(u64_to_user_ptr(args->extensions),
|
||||
NULL, 0,
|
||||
ppgtt);
|
||||
if (err)
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
|
||||
if (err)
|
||||
goto err_put;
|
||||
|
||||
err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
|
||||
if (err < 0)
|
||||
goto err_unlock;
|
||||
|
||||
GEM_BUG_ON(err == 0); /* reserved for default/unassigned ppgtt */
|
||||
ppgtt->user_handle = err;
|
||||
|
||||
mutex_unlock(&file_priv->vm_idr_lock);
|
||||
|
||||
args->vm_id = err;
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&file_priv->vm_idr_lock);
|
||||
err_put:
|
||||
i915_ppgtt_put(ppgtt);
|
||||
return err;
|
||||
}
|
||||
|
||||
int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct drm_i915_gem_vm_control *args = data;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
int err;
|
||||
u32 id;
|
||||
|
||||
if (args->flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (args->extensions)
|
||||
return -EINVAL;
|
||||
|
||||
id = args->vm_id;
|
||||
if (!id)
|
||||
return -ENOENT;
|
||||
|
||||
err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ppgtt = idr_remove(&file_priv->vm_idr, id);
|
||||
if (ppgtt) {
|
||||
GEM_BUG_ON(ppgtt->user_handle != id);
|
||||
ppgtt->user_handle = 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&file_priv->vm_idr_lock);
|
||||
if (!ppgtt)
|
||||
return -ENOENT;
|
||||
|
||||
i915_ppgtt_put(ppgtt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct i915_request *
|
||||
|
@ -702,12 +830,13 @@ static void cb_retire(struct i915_active *base)
|
|||
I915_SELFTEST_DECLARE(static unsigned long context_barrier_inject_fault);
|
||||
static int context_barrier_task(struct i915_gem_context *ctx,
|
||||
unsigned long engines,
|
||||
int (*emit)(struct i915_request *rq, void *data),
|
||||
void (*task)(void *data),
|
||||
void *data)
|
||||
{
|
||||
struct drm_i915_private *i915 = ctx->i915;
|
||||
struct context_barrier_task *cb;
|
||||
struct intel_context *ce;
|
||||
struct intel_context *ce, *next;
|
||||
intel_wakeref_t wakeref;
|
||||
int err = 0;
|
||||
|
||||
|
@ -722,11 +851,11 @@ static int context_barrier_task(struct i915_gem_context *ctx,
|
|||
i915_active_acquire(&cb->base);
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
list_for_each_entry(ce, &ctx->active_engines, active_link) {
|
||||
rbtree_postorder_for_each_entry_safe(ce, next, &ctx->hw_contexts, node) {
|
||||
struct intel_engine_cs *engine = ce->engine;
|
||||
struct i915_request *rq;
|
||||
|
||||
if (!(ce->engine->mask & engines))
|
||||
if (!(engine->mask & engines))
|
||||
continue;
|
||||
|
||||
if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
|
||||
|
@ -741,7 +870,12 @@ static int context_barrier_task(struct i915_gem_context *ctx,
|
|||
break;
|
||||
}
|
||||
|
||||
err = i915_active_ref(&cb->base, rq->fence.context, rq);
|
||||
err = 0;
|
||||
if (emit)
|
||||
err = emit(rq, data);
|
||||
if (err == 0)
|
||||
err = i915_active_ref(&cb->base, rq->fence.context, rq);
|
||||
|
||||
i915_request_add(rq);
|
||||
if (err)
|
||||
break;
|
||||
|
@ -804,6 +938,170 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int get_ppgtt(struct i915_gem_context *ctx,
|
||||
struct drm_i915_gem_context_param *args)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = ctx->file_priv;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
int ret;
|
||||
|
||||
if (!ctx->ppgtt)
|
||||
return -ENODEV;
|
||||
|
||||
/* XXX rcu acquire? */
|
||||
ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ppgtt = i915_ppgtt_get(ctx->ppgtt);
|
||||
mutex_unlock(&ctx->i915->drm.struct_mutex);
|
||||
|
||||
ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
|
||||
if (ret)
|
||||
goto err_put;
|
||||
|
||||
if (!ppgtt->user_handle) {
|
||||
ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
|
||||
GEM_BUG_ON(!ret);
|
||||
if (ret < 0)
|
||||
goto err_unlock;
|
||||
|
||||
ppgtt->user_handle = ret;
|
||||
i915_ppgtt_get(ppgtt);
|
||||
}
|
||||
|
||||
args->size = 0;
|
||||
args->value = ppgtt->user_handle;
|
||||
|
||||
ret = 0;
|
||||
err_unlock:
|
||||
mutex_unlock(&file_priv->vm_idr_lock);
|
||||
err_put:
|
||||
i915_ppgtt_put(ppgtt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_ppgtt_barrier(void *data)
|
||||
{
|
||||
struct i915_hw_ppgtt *old = data;
|
||||
|
||||
if (INTEL_GEN(old->vm.i915) < 8)
|
||||
gen6_ppgtt_unpin_all(old);
|
||||
|
||||
i915_ppgtt_put(old);
|
||||
}
|
||||
|
||||
static int emit_ppgtt_update(struct i915_request *rq, void *data)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
|
||||
struct intel_engine_cs *engine = rq->engine;
|
||||
u32 *cs;
|
||||
int i;
|
||||
|
||||
if (i915_vm_is_4lvl(&ppgtt->vm)) {
|
||||
const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
|
||||
|
||||
cs = intel_ring_begin(rq, 6);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(2);
|
||||
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, 0));
|
||||
*cs++ = upper_32_bits(pd_daddr);
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, 0));
|
||||
*cs++ = lower_32_bits(pd_daddr);
|
||||
|
||||
*cs++ = MI_NOOP;
|
||||
intel_ring_advance(rq, cs);
|
||||
} else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
|
||||
cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
|
||||
for (i = GEN8_3LVL_PDPES; i--; ) {
|
||||
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
|
||||
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
|
||||
*cs++ = upper_32_bits(pd_daddr);
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
|
||||
*cs++ = lower_32_bits(pd_daddr);
|
||||
}
|
||||
*cs++ = MI_NOOP;
|
||||
intel_ring_advance(rq, cs);
|
||||
} else {
|
||||
/* ppGTT is not part of the legacy context image */
|
||||
gen6_ppgtt_pin(ppgtt);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_ppgtt(struct i915_gem_context *ctx,
|
||||
struct drm_i915_gem_context_param *args)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = ctx->file_priv;
|
||||
struct i915_hw_ppgtt *ppgtt, *old;
|
||||
int err;
|
||||
|
||||
if (args->size)
|
||||
return -EINVAL;
|
||||
|
||||
if (!ctx->ppgtt)
|
||||
return -ENODEV;
|
||||
|
||||
if (upper_32_bits(args->value))
|
||||
return -ENOENT;
|
||||
|
||||
err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ppgtt = idr_find(&file_priv->vm_idr, args->value);
|
||||
if (ppgtt) {
|
||||
GEM_BUG_ON(ppgtt->user_handle != args->value);
|
||||
i915_ppgtt_get(ppgtt);
|
||||
}
|
||||
mutex_unlock(&file_priv->vm_idr_lock);
|
||||
if (!ppgtt)
|
||||
return -ENOENT;
|
||||
|
||||
err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (ppgtt == ctx->ppgtt)
|
||||
goto unlock;
|
||||
|
||||
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
|
||||
lut_close(ctx);
|
||||
|
||||
old = __set_ppgtt(ctx, ppgtt);
|
||||
|
||||
/*
|
||||
* We need to flush any requests using the current ppgtt before
|
||||
* we release it as the requests do not hold a reference themselves,
|
||||
* only indirectly through the context.
|
||||
*/
|
||||
err = context_barrier_task(ctx, ALL_ENGINES,
|
||||
emit_ppgtt_update,
|
||||
set_ppgtt_barrier,
|
||||
old);
|
||||
if (err) {
|
||||
ctx->ppgtt = old;
|
||||
ctx->desc_template = default_desc_template(ctx->i915, old);
|
||||
i915_ppgtt_put(ppgtt);
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&ctx->i915->drm.struct_mutex);
|
||||
|
||||
out:
|
||||
i915_ppgtt_put(ppgtt);
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool client_is_banned(struct drm_i915_file_private *file_priv)
|
||||
{
|
||||
return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
|
||||
|
@ -984,6 +1282,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
case I915_CONTEXT_PARAM_SSEU:
|
||||
ret = get_sseu(ctx, args);
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_VM:
|
||||
ret = get_ppgtt(ctx, args);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
@ -1285,9 +1586,6 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
|||
return -ENOENT;
|
||||
|
||||
switch (args->param) {
|
||||
case I915_CONTEXT_PARAM_BAN_PERIOD:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_NO_ZEROMAP:
|
||||
if (args->size)
|
||||
ret = -EINVAL;
|
||||
|
@ -1343,9 +1641,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
|||
I915_USER_PRIORITY(priority);
|
||||
}
|
||||
break;
|
||||
|
||||
case I915_CONTEXT_PARAM_SSEU:
|
||||
ret = set_sseu(ctx, args);
|
||||
break;
|
||||
|
||||
case I915_CONTEXT_PARAM_VM:
|
||||
ret = set_ppgtt(ctx, args);
|
||||
break;
|
||||
|
||||
case I915_CONTEXT_PARAM_BAN_PERIOD:
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
|
|
@ -148,6 +148,11 @@ void i915_gem_context_release(struct kref *ctx_ref);
|
|||
struct i915_gem_context *
|
||||
i915_gem_context_create_gvt(struct drm_device *dev);
|
||||
|
||||
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
||||
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
|
|
|
@ -1937,6 +1937,8 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
|
|||
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(ppgtt->base.vm.closed);
|
||||
|
||||
/*
|
||||
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
|
||||
* which will be pinned into every active context.
|
||||
|
@ -1975,6 +1977,17 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
|
|||
i915_vma_unpin(ppgtt->vma);
|
||||
}
|
||||
|
||||
void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
|
||||
{
|
||||
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
|
||||
|
||||
if (!ppgtt->pin_count)
|
||||
return;
|
||||
|
||||
ppgtt->pin_count = 0;
|
||||
i915_vma_unpin(ppgtt->vma);
|
||||
}
|
||||
|
||||
static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_ggtt * const ggtt = &i915->ggtt;
|
||||
|
@ -2082,12 +2095,6 @@ i915_ppgtt_create(struct drm_i915_private *i915)
|
|||
return ppgtt;
|
||||
}
|
||||
|
||||
void i915_ppgtt_close(struct i915_address_space *vm)
|
||||
{
|
||||
GEM_BUG_ON(vm->closed);
|
||||
vm->closed = true;
|
||||
}
|
||||
|
||||
static void ppgtt_destroy_vma(struct i915_address_space *vm)
|
||||
{
|
||||
struct list_head *phases[] = {
|
||||
|
|
|
@ -396,6 +396,8 @@ struct i915_hw_ppgtt {
|
|||
struct i915_page_directory_pointer pdp; /* GEN8+ */
|
||||
struct i915_page_directory pd; /* GEN6-7 */
|
||||
};
|
||||
|
||||
u32 user_handle;
|
||||
};
|
||||
|
||||
struct gen6_hw_ppgtt {
|
||||
|
@ -605,13 +607,12 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
|
|||
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
|
||||
|
||||
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
|
||||
void i915_ppgtt_close(struct i915_address_space *vm);
|
||||
void i915_ppgtt_release(struct kref *kref);
|
||||
|
||||
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
|
||||
static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
if (ppgtt)
|
||||
kref_get(&ppgtt->ref);
|
||||
kref_get(&ppgtt->ref);
|
||||
return ppgtt;
|
||||
}
|
||||
|
||||
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
|
||||
|
@ -622,6 +623,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
|
|||
|
||||
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
|
||||
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
|
||||
void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base);
|
||||
|
||||
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
|
||||
|
|
|
@ -1732,7 +1732,6 @@ int i915_gem_huge_page_mock_selftests(void)
|
|||
err = i915_subtests(tests, ppgtt);
|
||||
|
||||
out_close:
|
||||
i915_ppgtt_close(&ppgtt->vm);
|
||||
i915_ppgtt_put(ppgtt);
|
||||
|
||||
out_unlock:
|
||||
|
|
|
@ -373,7 +373,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
|
||||
static noinline int cpu_check(struct drm_i915_gem_object *obj,
|
||||
unsigned int idx, unsigned int max)
|
||||
{
|
||||
unsigned int n, m, needs_flush;
|
||||
int err;
|
||||
|
@ -391,8 +392,10 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
|
|||
|
||||
for (m = 0; m < max; m++) {
|
||||
if (map[m] != m) {
|
||||
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
|
||||
n, m, map[m], m);
|
||||
pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
|
||||
__builtin_return_address(0), idx,
|
||||
n, real_page_count(obj), m, max,
|
||||
map[m], m);
|
||||
err = -EINVAL;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
@ -400,8 +403,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
|
|||
|
||||
for (; m < DW_PER_PAGE; m++) {
|
||||
if (map[m] != STACK_MAGIC) {
|
||||
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
|
||||
n, m, map[m], STACK_MAGIC);
|
||||
pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
|
||||
__builtin_return_address(0), idx, n, m,
|
||||
map[m], STACK_MAGIC);
|
||||
err = -EINVAL;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
@ -479,12 +483,8 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
|
|||
static int igt_ctx_exec(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct drm_i915_gem_object *obj = NULL;
|
||||
unsigned long ncontexts, ndwords, dw;
|
||||
struct igt_live_test t;
|
||||
struct drm_file *file;
|
||||
IGT_TIMEOUT(end_time);
|
||||
LIST_HEAD(objects);
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int err = -ENODEV;
|
||||
|
||||
/*
|
||||
|
@ -496,38 +496,42 @@ static int igt_ctx_exec(void *arg)
|
|||
if (!DRIVER_CAPS(i915)->has_logical_contexts)
|
||||
return 0;
|
||||
|
||||
file = mock_file(i915);
|
||||
if (IS_ERR(file))
|
||||
return PTR_ERR(file);
|
||||
for_each_engine(engine, i915, id) {
|
||||
struct drm_i915_gem_object *obj = NULL;
|
||||
unsigned long ncontexts, ndwords, dw;
|
||||
struct igt_live_test t;
|
||||
struct drm_file *file;
|
||||
IGT_TIMEOUT(end_time);
|
||||
LIST_HEAD(objects);
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
if (!intel_engine_can_store_dword(engine))
|
||||
continue;
|
||||
|
||||
err = igt_live_test_begin(&t, i915, __func__, "");
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
if (!engine->context_size)
|
||||
continue; /* No logical context support in HW */
|
||||
|
||||
ncontexts = 0;
|
||||
ndwords = 0;
|
||||
dw = 0;
|
||||
while (!time_after(jiffies, end_time)) {
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_gem_context *ctx;
|
||||
unsigned int id;
|
||||
file = mock_file(i915);
|
||||
if (IS_ERR(file))
|
||||
return PTR_ERR(file);
|
||||
|
||||
ctx = live_context(i915, file);
|
||||
if (IS_ERR(ctx)) {
|
||||
err = PTR_ERR(ctx);
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
|
||||
err = igt_live_test_begin(&t, i915, __func__, engine->name);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
ncontexts = 0;
|
||||
ndwords = 0;
|
||||
dw = 0;
|
||||
while (!time_after(jiffies, end_time)) {
|
||||
struct i915_gem_context *ctx;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
if (!engine->context_size)
|
||||
continue; /* No logical context support in HW */
|
||||
|
||||
if (!intel_engine_can_store_dword(engine))
|
||||
continue;
|
||||
ctx = live_context(i915, file);
|
||||
if (IS_ERR(ctx)) {
|
||||
err = PTR_ERR(ctx);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!obj) {
|
||||
obj = create_test_object(ctx, file, &objects);
|
||||
|
@ -537,7 +541,6 @@ static int igt_ctx_exec(void *arg)
|
|||
}
|
||||
}
|
||||
|
||||
err = 0;
|
||||
with_intel_runtime_pm(i915, wakeref)
|
||||
err = gpu_fill(obj, ctx, engine, dw);
|
||||
if (err) {
|
||||
|
@ -552,28 +555,152 @@ static int igt_ctx_exec(void *arg)
|
|||
obj = NULL;
|
||||
dw = 0;
|
||||
}
|
||||
|
||||
ndwords++;
|
||||
ncontexts++;
|
||||
}
|
||||
ncontexts++;
|
||||
}
|
||||
pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
|
||||
ncontexts, RUNTIME_INFO(i915)->num_engines, ndwords);
|
||||
|
||||
dw = 0;
|
||||
list_for_each_entry(obj, &objects, st_link) {
|
||||
unsigned int rem =
|
||||
min_t(unsigned int, ndwords - dw, max_dwords(obj));
|
||||
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
|
||||
ncontexts, engine->name, ndwords);
|
||||
|
||||
err = cpu_check(obj, rem);
|
||||
if (err)
|
||||
break;
|
||||
ncontexts = dw = 0;
|
||||
list_for_each_entry(obj, &objects, st_link) {
|
||||
unsigned int rem =
|
||||
min_t(unsigned int, ndwords - dw, max_dwords(obj));
|
||||
|
||||
dw += rem;
|
||||
}
|
||||
err = cpu_check(obj, ncontexts++, rem);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
dw += rem;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
if (igt_live_test_end(&t))
|
||||
err = -EIO;
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
mock_file_free(i915, file);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int igt_shared_ctx_exec(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct i915_gem_context *parent;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
struct igt_live_test t;
|
||||
struct drm_file *file;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* Create a few different contexts with the same mm and write
|
||||
* through each ctx using the GPU making sure those writes end
|
||||
* up in the expected pages of our obj.
|
||||
*/
|
||||
if (!DRIVER_CAPS(i915)->has_logical_contexts)
|
||||
return 0;
|
||||
|
||||
file = mock_file(i915);
|
||||
if (IS_ERR(file))
|
||||
return PTR_ERR(file);
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
|
||||
parent = live_context(i915, file);
|
||||
if (IS_ERR(parent)) {
|
||||
err = PTR_ERR(parent);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
|
||||
err = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
err = igt_live_test_begin(&t, i915, __func__, "");
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
unsigned long ncontexts, ndwords, dw;
|
||||
struct drm_i915_gem_object *obj = NULL;
|
||||
IGT_TIMEOUT(end_time);
|
||||
LIST_HEAD(objects);
|
||||
|
||||
if (!intel_engine_can_store_dword(engine))
|
||||
continue;
|
||||
|
||||
dw = 0;
|
||||
ndwords = 0;
|
||||
ncontexts = 0;
|
||||
while (!time_after(jiffies, end_time)) {
|
||||
struct i915_gem_context *ctx;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
ctx = kernel_context(i915);
|
||||
if (IS_ERR(ctx)) {
|
||||
err = PTR_ERR(ctx);
|
||||
goto out_test;
|
||||
}
|
||||
|
||||
__assign_ppgtt(ctx, parent->ppgtt);
|
||||
|
||||
if (!obj) {
|
||||
obj = create_test_object(parent, file, &objects);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
kernel_context_close(ctx);
|
||||
goto out_test;
|
||||
}
|
||||
}
|
||||
|
||||
err = 0;
|
||||
with_intel_runtime_pm(i915, wakeref)
|
||||
err = gpu_fill(obj, ctx, engine, dw);
|
||||
if (err) {
|
||||
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
|
||||
ndwords, dw, max_dwords(obj),
|
||||
engine->name, ctx->hw_id,
|
||||
yesno(!!ctx->ppgtt), err);
|
||||
kernel_context_close(ctx);
|
||||
goto out_test;
|
||||
}
|
||||
|
||||
if (++dw == max_dwords(obj)) {
|
||||
obj = NULL;
|
||||
dw = 0;
|
||||
}
|
||||
|
||||
ndwords++;
|
||||
ncontexts++;
|
||||
|
||||
kernel_context_close(ctx);
|
||||
}
|
||||
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
|
||||
ncontexts, engine->name, ndwords);
|
||||
|
||||
ncontexts = dw = 0;
|
||||
list_for_each_entry(obj, &objects, st_link) {
|
||||
unsigned int rem =
|
||||
min_t(unsigned int, ndwords - dw, max_dwords(obj));
|
||||
|
||||
err = cpu_check(obj, ncontexts++, rem);
|
||||
if (err)
|
||||
goto out_test;
|
||||
|
||||
dw += rem;
|
||||
}
|
||||
}
|
||||
out_test:
|
||||
if (igt_live_test_end(&t))
|
||||
err = -EIO;
|
||||
out_unlock:
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
mock_file_free(i915, file);
|
||||
|
@ -1046,7 +1173,7 @@ static int igt_ctx_readonly(void *arg)
|
|||
struct drm_i915_gem_object *obj = NULL;
|
||||
struct i915_gem_context *ctx;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
unsigned long ndwords, dw;
|
||||
unsigned long idx, ndwords, dw;
|
||||
struct igt_live_test t;
|
||||
struct drm_file *file;
|
||||
I915_RND_STATE(prng);
|
||||
|
@ -1127,6 +1254,7 @@ static int igt_ctx_readonly(void *arg)
|
|||
ndwords, RUNTIME_INFO(i915)->num_engines);
|
||||
|
||||
dw = 0;
|
||||
idx = 0;
|
||||
list_for_each_entry(obj, &objects, st_link) {
|
||||
unsigned int rem =
|
||||
min_t(unsigned int, ndwords - dw, max_dwords(obj));
|
||||
|
@ -1136,7 +1264,7 @@ static int igt_ctx_readonly(void *arg)
|
|||
if (i915_gem_object_is_readonly(obj))
|
||||
num_writes = 0;
|
||||
|
||||
err = cpu_check(obj, num_writes);
|
||||
err = cpu_check(obj, idx++, num_writes);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
|
@ -1619,7 +1747,8 @@ static int mock_context_barrier(void *arg)
|
|||
}
|
||||
|
||||
counter = 0;
|
||||
err = context_barrier_task(ctx, 0, mock_barrier_task, &counter);
|
||||
err = context_barrier_task(ctx, 0,
|
||||
NULL, mock_barrier_task, &counter);
|
||||
if (err) {
|
||||
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
|
||||
goto out;
|
||||
|
@ -1631,8 +1760,8 @@ static int mock_context_barrier(void *arg)
|
|||
}
|
||||
|
||||
counter = 0;
|
||||
err = context_barrier_task(ctx,
|
||||
ALL_ENGINES, mock_barrier_task, &counter);
|
||||
err = context_barrier_task(ctx, ALL_ENGINES,
|
||||
NULL, mock_barrier_task, &counter);
|
||||
if (err) {
|
||||
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
|
||||
goto out;
|
||||
|
@ -1655,8 +1784,8 @@ static int mock_context_barrier(void *arg)
|
|||
|
||||
counter = 0;
|
||||
context_barrier_inject_fault = BIT(RCS0);
|
||||
err = context_barrier_task(ctx,
|
||||
ALL_ENGINES, mock_barrier_task, &counter);
|
||||
err = context_barrier_task(ctx, ALL_ENGINES,
|
||||
NULL, mock_barrier_task, &counter);
|
||||
context_barrier_inject_fault = 0;
|
||||
if (err == -ENXIO)
|
||||
err = 0;
|
||||
|
@ -1670,8 +1799,8 @@ static int mock_context_barrier(void *arg)
|
|||
goto out;
|
||||
|
||||
counter = 0;
|
||||
err = context_barrier_task(ctx,
|
||||
ALL_ENGINES, mock_barrier_task, &counter);
|
||||
err = context_barrier_task(ctx, ALL_ENGINES,
|
||||
NULL, mock_barrier_task, &counter);
|
||||
if (err) {
|
||||
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
|
||||
goto out;
|
||||
|
@ -1719,6 +1848,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
|
|||
SUBTEST(igt_ctx_exec),
|
||||
SUBTEST(igt_ctx_readonly),
|
||||
SUBTEST(igt_ctx_sseu),
|
||||
SUBTEST(igt_shared_ctx_exec),
|
||||
SUBTEST(igt_vm_isolation),
|
||||
};
|
||||
|
||||
|
|
|
@ -1020,7 +1020,6 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
|
|||
|
||||
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
|
||||
|
||||
i915_ppgtt_close(&ppgtt->vm);
|
||||
i915_ppgtt_put(ppgtt);
|
||||
out_unlock:
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
|
|
@ -54,13 +54,17 @@ mock_context(struct drm_i915_private *i915,
|
|||
goto err_handles;
|
||||
|
||||
if (name) {
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
|
||||
ctx->name = kstrdup(name, GFP_KERNEL);
|
||||
if (!ctx->name)
|
||||
goto err_put;
|
||||
|
||||
ctx->ppgtt = mock_ppgtt(i915, name);
|
||||
if (!ctx->ppgtt)
|
||||
ppgtt = mock_ppgtt(i915, name);
|
||||
if (!ppgtt)
|
||||
goto err_put;
|
||||
|
||||
__set_ppgtt(ctx, ppgtt);
|
||||
}
|
||||
|
||||
return ctx;
|
||||
|
|
|
@ -343,6 +343,8 @@ typedef struct _drm_i915_sarea {
|
|||
#define DRM_I915_PERF_ADD_CONFIG 0x37
|
||||
#define DRM_I915_PERF_REMOVE_CONFIG 0x38
|
||||
#define DRM_I915_QUERY 0x39
|
||||
#define DRM_I915_GEM_VM_CREATE 0x3a
|
||||
#define DRM_I915_GEM_VM_DESTROY 0x3b
|
||||
/* Must be kept compact -- no holes */
|
||||
|
||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||
|
@ -402,6 +404,8 @@ typedef struct _drm_i915_sarea {
|
|||
#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
|
||||
#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
|
||||
#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
|
||||
#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
|
||||
#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
|
||||
|
||||
/* Allow drivers to submit batchbuffers directly to hardware, relying
|
||||
* on the security mechanisms provided by hardware.
|
||||
|
@ -1453,6 +1457,33 @@ struct drm_i915_gem_context_destroy {
|
|||
__u32 pad;
|
||||
};
|
||||
|
||||
/*
|
||||
* DRM_I915_GEM_VM_CREATE -
|
||||
*
|
||||
* Create a new virtual memory address space (ppGTT) for use within a context
|
||||
* on the same file. Extensions can be provided to configure exactly how the
|
||||
* address space is setup upon creation.
|
||||
*
|
||||
* The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
|
||||
* returned in the outparam @vm_id.
|
||||
*
|
||||
* No flags are defined, with all bits reserved and must be zero.
|
||||
*
|
||||
* An extension chain maybe provided, starting with @extensions, and terminated
|
||||
* by the @next_extension being 0. Currently, no extensions are defined.
|
||||
*
|
||||
* DRM_I915_GEM_VM_DESTROY -
|
||||
*
|
||||
* Destroys a previously created VM id, specified in @vm_id.
|
||||
*
|
||||
* No extensions or flags are allowed currently, and so must be zero.
|
||||
*/
|
||||
struct drm_i915_gem_vm_control {
|
||||
__u64 extensions;
|
||||
__u32 flags;
|
||||
__u32 vm_id;
|
||||
};
|
||||
|
||||
struct drm_i915_reg_read {
|
||||
/*
|
||||
* Register offset.
|
||||
|
@ -1542,7 +1573,19 @@ struct drm_i915_gem_context_param {
|
|||
* On creation, all new contexts are marked as recoverable.
|
||||
*/
|
||||
#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
|
||||
|
||||
/*
|
||||
* The id of the associated virtual memory address space (ppGTT) of
|
||||
* this context. Can be retrieved and passed to another context
|
||||
* (on the same fd) for both to use the same ppGTT and so share
|
||||
* address layouts, and avoid reloading the page tables on context
|
||||
* switches between themselves.
|
||||
*
|
||||
* See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
|
||||
*/
|
||||
#define I915_CONTEXT_PARAM_VM 0x9
|
||||
/* Must be kept compact -- no holes and well documented */
|
||||
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue