mirror of https://gitee.com/openkylin/linux.git
Merge tag 'drm-intel-fixes-2017-04-12' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes
drm/i915 fixes for v4.11-rc7 one rcu related fix, and a few GVT fixes. * tag 'drm-intel-fixes-2017-04-12' of git://anongit.freedesktop.org/git/drm-intel: drm/i915: Don't call synchronize_rcu_expedited under struct_mutex drm/i915: Suspend GuC prior to GPU Reset during GEM suspend drm/i915/gvt: set the correct default value of CTX STATUS PTR drm/i915/gvt: Fix firmware loading interface for GVT-g golden HW state drm/i915: Use a dummy timeline name for a signaled fence drm/i915: Ironlake do_idle_maps w/a may be called w/o struct_mutex drm/i915/gvt: remove the redundant info NULL check drm/i915/gvt: adjust mem size for low resolution type drm/i915: Avoid lock dropping between rescheduling drm/i915/gvt: exclude cfg space from failsafe mode drm/i915/gvt: Activate/de-activate vGPU in mdev ops. drm/i915/execlists: Wrap tail pointer after reset tweaking drm/i915/perf: remove user triggerable warn drm/i915/perf: destroy stream on sample_flags mismatch drm/i915: Align "unfenced" tiled access on gen2, early gen3
This commit is contained in:
commit
88b0b92bda
|
@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (vgpu->failsafe)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(bytes > 4))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -775,7 +775,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
|
|||
_EL_OFFSET_STATUS_PTR);
|
||||
|
||||
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
|
||||
ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
|
||||
ctx_status_ptr.read_ptr = 0;
|
||||
ctx_status_ptr.write_ptr = 0x7;
|
||||
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
|
||||
}
|
||||
|
||||
|
|
|
@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
|||
struct gvt_firmware_header *h;
|
||||
void *firmware;
|
||||
void *p;
|
||||
unsigned long size;
|
||||
unsigned long size, crc32_start;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
|
||||
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
|
||||
firmware = vzalloc(size);
|
||||
if (!firmware)
|
||||
return -ENOMEM;
|
||||
|
@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
|||
|
||||
memcpy(gvt->firmware.mmio, p, info->mmio_size);
|
||||
|
||||
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
|
||||
h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
|
||||
|
||||
firmware_attr.size = size;
|
||||
firmware_attr.private = firmware;
|
||||
|
||||
|
@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
|
|||
|
||||
firmware->mmio = mem;
|
||||
|
||||
sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
|
||||
sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
|
||||
GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
|
||||
pdev->revision);
|
||||
|
||||
|
|
|
@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
|
|||
.vgpu_create = intel_gvt_create_vgpu,
|
||||
.vgpu_destroy = intel_gvt_destroy_vgpu,
|
||||
.vgpu_reset = intel_gvt_reset_vgpu,
|
||||
.vgpu_activate = intel_gvt_activate_vgpu,
|
||||
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -382,7 +382,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
|
|||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask);
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
|
||||
|
||||
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
|
||||
|
||||
/* validating GM functions */
|
||||
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
|
||||
|
@ -449,6 +450,8 @@ struct intel_gvt_ops {
|
|||
struct intel_vgpu_type *);
|
||||
void (*vgpu_destroy)(struct intel_vgpu *);
|
||||
void (*vgpu_reset)(struct intel_vgpu *);
|
||||
void (*vgpu_activate)(struct intel_vgpu *);
|
||||
void (*vgpu_deactivate)(struct intel_vgpu *);
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -544,6 +544,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
|
|||
if (ret)
|
||||
goto undo_group;
|
||||
|
||||
intel_gvt_ops->vgpu_activate(vgpu);
|
||||
|
||||
atomic_set(&vgpu->vdev.released, 0);
|
||||
return ret;
|
||||
|
||||
|
@ -569,6 +571,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
|
|||
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
|
||||
return;
|
||||
|
||||
intel_gvt_ops->vgpu_deactivate(vgpu);
|
||||
|
||||
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
|
||||
&vgpu->vdev.iommu_notifier);
|
||||
WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
|
||||
|
@ -1340,13 +1344,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
|
|||
|
||||
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
|
||||
{
|
||||
struct intel_vgpu *vgpu = info->vgpu;
|
||||
|
||||
if (!info) {
|
||||
gvt_vgpu_err("kvmgt_guest_info invalid\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
|
||||
kvm_put_kvm(info->kvm);
|
||||
kvmgt_protect_table_destroy(info);
|
||||
|
|
|
@ -72,7 +72,7 @@ static struct {
|
|||
char *name;
|
||||
} vgpu_types[] = {
|
||||
/* Fixed vGPU type table */
|
||||
{ MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
|
||||
{ MB_TO_BYTES(64), MB_TO_BYTES(384), 4, GVT_EDID_1024_768, "8" },
|
||||
{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
|
||||
{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
|
||||
{ MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
|
||||
|
@ -178,6 +178,47 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_active_vgpu - activate a virtual GPU
|
||||
* @vgpu: virtual GPU
|
||||
*
|
||||
* This function is called when user wants to activate a virtual GPU.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
vgpu->active = true;
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_deactive_vgpu - deactivate a virtual GPU
|
||||
* @vgpu: virtual GPU
|
||||
*
|
||||
* This function is called when user wants to deactivate a virtual GPU.
|
||||
* All virtual GPU runtime information will be destroyed.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
vgpu->active = false;
|
||||
|
||||
if (atomic_read(&vgpu->running_workload_num)) {
|
||||
mutex_unlock(&gvt->lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_destroy_vgpu - destroy a virtual GPU
|
||||
* @vgpu: virtual GPU
|
||||
|
@ -191,16 +232,9 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
|||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
vgpu->active = false;
|
||||
WARN(vgpu->active, "vGPU is still active!\n");
|
||||
|
||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||
|
||||
if (atomic_read(&vgpu->running_workload_num)) {
|
||||
mutex_unlock(&gvt->lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
intel_vgpu_clean_sched_policy(vgpu);
|
||||
intel_vgpu_clean_gvt_context(vgpu);
|
||||
intel_vgpu_clean_execlist(vgpu);
|
||||
|
@ -277,7 +311,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
if (ret)
|
||||
goto out_clean_shadow_ctx;
|
||||
|
||||
vgpu->active = true;
|
||||
mutex_unlock(&gvt->lock);
|
||||
|
||||
return vgpu;
|
||||
|
|
|
@ -1434,8 +1434,6 @@ static int i915_drm_suspend(struct drm_device *dev)
|
|||
goto out;
|
||||
}
|
||||
|
||||
intel_guc_suspend(dev_priv);
|
||||
|
||||
intel_display_suspend(dev);
|
||||
|
||||
intel_dp_mst_suspend(dev);
|
||||
|
|
|
@ -806,6 +806,7 @@ struct intel_csr {
|
|||
func(has_resource_streamer); \
|
||||
func(has_runtime_pm); \
|
||||
func(has_snoop); \
|
||||
func(unfenced_needs_alignment); \
|
||||
func(cursor_needs_physical); \
|
||||
func(hws_needs_physical); \
|
||||
func(overlay_needs_physical); \
|
||||
|
|
|
@ -4348,6 +4348,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
|
|||
i915_gem_context_lost(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_guc_suspend(dev_priv);
|
||||
|
||||
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
||||
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
|
||||
|
||||
|
|
|
@ -888,6 +888,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
|
|||
struct list_head ordered_vmas;
|
||||
struct list_head pinned_vmas;
|
||||
bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
|
||||
bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
|
||||
int retry;
|
||||
|
||||
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
|
||||
|
@ -908,7 +909,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
|
|||
if (!has_fenced_gpu_access)
|
||||
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
|
||||
need_fence =
|
||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||
(entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
|
||||
needs_unfenced_map) &&
|
||||
i915_gem_object_is_tiled(obj);
|
||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||
|
||||
|
|
|
@ -2704,7 +2704,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
|
|||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
|
||||
if (unlikely(ggtt->do_idle_maps)) {
|
||||
if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
|
||||
if (i915_gem_wait_for_idle(dev_priv, 0)) {
|
||||
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
|
||||
/* Wait a bit, in hopes it avoids the hang */
|
||||
udelay(10);
|
||||
|
|
|
@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
|
|||
|
||||
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
|
||||
{
|
||||
/* The timeline struct (as part of the ppgtt underneath a context)
|
||||
* may be freed when the request is no longer in use by the GPU.
|
||||
* We could extend the life of a context to beyond that of all
|
||||
* fences, possibly keeping the hw resource around indefinitely,
|
||||
* or we just give them a false name. Since
|
||||
* dma_fence_ops.get_timeline_name is a debug feature, the occasional
|
||||
* lie seems justifiable.
|
||||
*/
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return "signaled";
|
||||
|
||||
return to_request(fence)->timeline->common->name;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
|
|||
BUG();
|
||||
}
|
||||
|
||||
static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
|
||||
{
|
||||
if (!unlock)
|
||||
return;
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* expedite the RCU grace period to free some request slabs */
|
||||
synchronize_rcu_expedited();
|
||||
}
|
||||
|
||||
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
if (unlock)
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
/* expedite the RCU grace period to free some request slabs */
|
||||
synchronize_rcu_expedited();
|
||||
i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -293,8 +301,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
count += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
if (unlock)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
i915_gem_shrinker_unlock(dev, unlock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -321,8 +328,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
sc->nr_to_scan - freed,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND);
|
||||
if (unlock)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
i915_gem_shrinker_unlock(dev, unlock);
|
||||
|
||||
return freed;
|
||||
}
|
||||
|
@ -364,8 +371,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
|
|||
struct shrinker_lock_uninterruptible *slu)
|
||||
{
|
||||
dev_priv->mm.interruptible = slu->was_interruptible;
|
||||
if (slu->unlock)
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -60,6 +60,7 @@
|
|||
.has_overlay = 1, .overlay_needs_physical = 1, \
|
||||
.has_gmch_display = 1, \
|
||||
.hws_needs_physical = 1, \
|
||||
.unfenced_needs_alignment = 1, \
|
||||
.ring_mask = RENDER_RING, \
|
||||
GEN_DEFAULT_PIPEOFFSETS, \
|
||||
CURSOR_OFFSETS
|
||||
|
@ -101,6 +102,7 @@ static const struct intel_device_info intel_i915g_info = {
|
|||
.platform = INTEL_I915G, .cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.hws_needs_physical = 1,
|
||||
.unfenced_needs_alignment = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i915gm_info = {
|
||||
|
@ -112,6 +114,7 @@ static const struct intel_device_info intel_i915gm_info = {
|
|||
.supports_tv = 1,
|
||||
.has_fbc = 1,
|
||||
.hws_needs_physical = 1,
|
||||
.unfenced_needs_alignment = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i945g_info = {
|
||||
|
@ -120,6 +123,7 @@ static const struct intel_device_info intel_i945g_info = {
|
|||
.has_hotplug = 1, .cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.hws_needs_physical = 1,
|
||||
.unfenced_needs_alignment = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i945gm_info = {
|
||||
|
@ -130,6 +134,7 @@ static const struct intel_device_info intel_i945gm_info = {
|
|||
.supports_tv = 1,
|
||||
.has_fbc = 1,
|
||||
.hws_needs_physical = 1,
|
||||
.unfenced_needs_alignment = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g33_info = {
|
||||
|
|
|
@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
if (WARN_ON(stream->sample_flags != props->sample_flags)) {
|
||||
ret = -ENODEV;
|
||||
goto err_alloc;
|
||||
goto err_flags;
|
||||
}
|
||||
|
||||
list_add(&stream->link, &dev_priv->perf.streams);
|
||||
|
@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
|
|||
|
||||
err_open:
|
||||
list_del(&stream->link);
|
||||
err_flags:
|
||||
if (stream->ops->destroy)
|
||||
stream->ops->destroy(stream);
|
||||
err_alloc:
|
||||
|
@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
|
||||
DRM_DEBUG("Unknown i915 perf property ID\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch ((enum drm_i915_perf_property_id)id) {
|
||||
case DRM_I915_PERF_PROP_CTX_HANDLE:
|
||||
props->single_context = 1;
|
||||
|
@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
|
|||
props->oa_periodic = true;
|
||||
props->oa_period_exponent = value;
|
||||
break;
|
||||
default:
|
||||
case DRM_I915_PERF_PROP_MAX:
|
||||
MISSING_CASE(id);
|
||||
DRM_DEBUG("Unknown i915 perf property ID\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -670,15 +670,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
|
|||
static struct intel_engine_cs *
|
||||
pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_engine_cs *engine =
|
||||
container_of(pt, struct drm_i915_gem_request, priotree)->engine;
|
||||
|
||||
GEM_BUG_ON(!locked);
|
||||
|
||||
engine = container_of(pt,
|
||||
struct drm_i915_gem_request,
|
||||
priotree)->engine;
|
||||
if (engine != locked) {
|
||||
if (locked)
|
||||
spin_unlock_irq(&locked->timeline->lock);
|
||||
spin_lock_irq(&engine->timeline->lock);
|
||||
spin_unlock(&locked->timeline->lock);
|
||||
spin_lock(&engine->timeline->lock);
|
||||
}
|
||||
|
||||
return engine;
|
||||
|
@ -686,7 +685,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
|
|||
|
||||
static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
|
||||
{
|
||||
struct intel_engine_cs *engine = NULL;
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_dependency *dep, *p;
|
||||
struct i915_dependency stack;
|
||||
LIST_HEAD(dfs);
|
||||
|
@ -720,26 +719,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
|
|||
list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
|
||||
struct i915_priotree *pt = dep->signaler;
|
||||
|
||||
list_for_each_entry(p, &pt->signalers_list, signal_link)
|
||||
/* Within an engine, there can be no cycle, but we may
|
||||
* refer to the same dependency chain multiple times
|
||||
* (redundant dependencies are not eliminated) and across
|
||||
* engines.
|
||||
*/
|
||||
list_for_each_entry(p, &pt->signalers_list, signal_link) {
|
||||
GEM_BUG_ON(p->signaler->priority < pt->priority);
|
||||
if (prio > READ_ONCE(p->signaler->priority))
|
||||
list_move_tail(&p->dfs_link, &dfs);
|
||||
}
|
||||
|
||||
list_safe_reset_next(dep, p, dfs_link);
|
||||
if (!RB_EMPTY_NODE(&pt->node))
|
||||
continue;
|
||||
|
||||
engine = pt_lock_engine(pt, engine);
|
||||
|
||||
/* If it is not already in the rbtree, we can update the
|
||||
* priority inplace and skip over it (and its dependencies)
|
||||
* if it is referenced *again* as we descend the dfs.
|
||||
*/
|
||||
if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
|
||||
pt->priority = prio;
|
||||
list_del_init(&dep->dfs_link);
|
||||
}
|
||||
}
|
||||
|
||||
engine = request->engine;
|
||||
spin_lock_irq(&engine->timeline->lock);
|
||||
|
||||
/* Fifo and depth-first replacement ensure our deps execute before us */
|
||||
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
|
||||
struct i915_priotree *pt = dep->signaler;
|
||||
|
@ -751,16 +747,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
|
|||
if (prio <= pt->priority)
|
||||
continue;
|
||||
|
||||
GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
|
||||
|
||||
pt->priority = prio;
|
||||
rb_erase(&pt->node, &engine->execlist_queue);
|
||||
if (insert_request(pt, &engine->execlist_queue))
|
||||
engine->execlist_first = &pt->node;
|
||||
if (!RB_EMPTY_NODE(&pt->node)) {
|
||||
rb_erase(&pt->node, &engine->execlist_queue);
|
||||
if (insert_request(pt, &engine->execlist_queue))
|
||||
engine->execlist_first = &pt->node;
|
||||
}
|
||||
}
|
||||
|
||||
if (engine)
|
||||
spin_unlock_irq(&engine->timeline->lock);
|
||||
spin_unlock_irq(&engine->timeline->lock);
|
||||
|
||||
/* XXX Do we need to preempt to make room for us and our deps? */
|
||||
}
|
||||
|
@ -1440,7 +1435,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
|
|||
GEM_BUG_ON(request->ctx != port[0].request->ctx);
|
||||
|
||||
/* Reset WaIdleLiteRestore:bdw,skl as well */
|
||||
request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
|
||||
request->tail =
|
||||
intel_ring_wrap(request->ring,
|
||||
request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
|
||||
}
|
||||
|
||||
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
|
||||
|
|
|
@ -521,11 +521,17 @@ static inline void intel_ring_advance(struct intel_ring *ring)
|
|||
*/
|
||||
}
|
||||
|
||||
static inline u32
|
||||
intel_ring_wrap(const struct intel_ring *ring, u32 pos)
|
||||
{
|
||||
return pos & (ring->size - 1);
|
||||
}
|
||||
|
||||
static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
|
||||
{
|
||||
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
|
||||
u32 offset = addr - ring->vaddr;
|
||||
return offset & (ring->size - 1);
|
||||
return intel_ring_wrap(ring, offset);
|
||||
}
|
||||
|
||||
int __intel_ring_space(int head, int tail, int size);
|
||||
|
|
Loading…
Reference in New Issue