mirror of https://gitee.com/openkylin/linux.git
This time really the last i915 batch for v4.15:
- PSR state tracking in crtc state (Ville) - Fix eviction when the GGTT is idle but full (Chris) - BDW DP aux channel timeout fix (James) - LSPCON detection fixes (Shashank) - Use for_each_pipe to iterate over pipes (Mika Kahola) - Replace *_reference/unreference() or *_ref/unref with _get/put() (Harsha) - Refactoring and preparation for DDI encoder type cleanup (Ville) - Broadwell DDI FDI buf translation fix (Chris) - Read CSB and CSB write pointer from HWSP in GVT-g VM if available (Weinan) - GuC/HuC firmware loader refactoring (Michal) - Make shrinking more effective and not stall so much (Chris) - Cannonlake PLL fixes (Rodrigo) - DP MST connector error propagation fixes (James) - Convert timers to use timer_setup (Kees Cook) - Skylake plane enable/disable unification (Juha-Pekka) - Fix to actually free driver internal objects when requested (Chris) - DDI buf trans refactoring (Ville) - Skip waking the device to service pwrite (Chris) - Improve DSI VBT backlight parsing abstraction (Madhav) - Cannonlake VBT DDC pin mapping fix (Rodrigo) -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEFWWmW3ewYy4RJOWc05gHnSar7m8FAlntvmAACgkQ05gHnSar 7m+W6g//bA7dZdhGfX01Ad/BvRR0gCays9LWyH5DZX5o34kEX5JgPZjesv+k2wmR ecpD9s29qXnnkDeQUU/oO0fd/K1/6i2/EyJuIHpe3j1zASRzMqUwZGz05ot2iyYG uS7l92P+QwCdKYWbKEn8gRzVwbmr3HZ4Hg5HVK81jQodObM9Gccp/Yk1olOm9WvN h/+uvBfKlDjBnHw0IrFvFq+woUIdJ1bzAQ4g+/QRdPWlE9rDa0qvGMvvar90tPcA FHGB71gaTZdAVtlF4XfQ1xOn5SmGJhc6NihdB00yR3H0/UVUClD9zwx4kLwYHD7P +fkKPFFhuDqzWLQonJW57ijBgFp1ygLnHg59ttENAQuwiZj9prOIdkduSvHEOWva g2DjeZ1csM79MfsvTOeV4MzY38q5NFBOixK6EsPurkyyFWE34L2gCKN5TBeNYsUc yuSZAmh92JcDZV8P2SlNHx09qjjmG48FrDr7fkWG/2pFBXbIpHiltF63q20HHfBe D3h7QgCipdGXJ/mk7Z8tpWrkn+ALddyShtQqAiTEHl7BGpdNFHECWl93LWFdqKfC rLolhJui936qJhiA6+fZqFEUtvgb7HHJPTQE5EOgu/15/H+DylPE5jFdHvH/P07v yqi6N6SLVTTxvudqv75GyKh53i9bzSrWHldCAacWmDtUGkEyAvM= =fQeu -----END PGP SIGNATURE----- Merge tag 'drm-intel-next-2017-10-23' of git://anongit.freedesktop.org/drm/drm-intel into drm-next This time really the last i915 batch for v4.15: - PSR state tracking in crtc state (Ville) - Fix eviction when the GGTT is idle but full (Chris) - BDW DP aux channel timeout fix (James) - LSPCON detection fixes (Shashank) - Use for_each_pipe to iterate over pipes (Mika Kahola) - Replace *_reference/unreference() or *_ref/unref with _get/put() (Harsha) - Refactoring and preparation for DDI encoder type cleanup (Ville) - Broadwell DDI FDI buf translation fix (Chris) - Read CSB and CSB write pointer from HWSP in GVT-g VM if available (Weinan) - GuC/HuC firmware loader refactoring (Michal) - Make shrinking more effective and not stall so much (Chris) - Cannonlake PLL fixes (Rodrigo) - DP MST connector error propagation fixes (James) - Convert timers to use timer_setup (Kees Cook) - Skylake plane enable/disable unification (Juha-Pekka) - Fix to actually free driver internal objects when requested (Chris) - DDI buf trans refactoring (Ville) - Skip waking the device to service pwrite (Chris) - Improve DSI VBT backlight parsing abstraction (Madhav) - Cannonlake VBT DDC pin mapping fix (Rodrigo) * tag 'drm-intel-next-2017-10-23' of git://anongit.freedesktop.org/drm/drm-intel: (87 commits) drm/i915: Update DRIVER_DATE to 20171023 drm/i915/cnl: Map VBT DDC Pin to BSpec DDC Pin. drm/i915: Let's use more enum intel_dpll_id pll_id. drm/i915: Use existing DSI backlight ports info drm/i915: Parse DSI backlight/cabc ports. drm/i915: Skip waking the device to service pwrite drm/i915/crt: split compute_config hook by platforms drm/i915: remove g4x lowfreq_avail and has_pipe_cxsr drm/i915: Drop the redundant hdmi prefix/suffix from a lot of variables drm/i915: Unify error handling for missing DDI buf trans tables drm/i915: Centralize the SKL DDI A/E vs. B/C/D buf trans handling drm/i915: Kill off the BXT buf_trans default_index drm/i915: Pass encoder type to cnl_ddi_vswing_sequence() explicitly drm/i915: Integrate BXT into intel_ddi_dp_voltage_max() drm/i915: Pass the level to intel_prepare_hdmi_ddi_buffers() drm/i915: Pass the encoder type explicitly to skl_set_iboost() drm/i915: Extract intel_ddi_get_buf_trans_hdmi() drm/i915: Relocate intel_ddi_get_buf_trans_*() functions drm/i915: Flush the idle-worker for debugfs/i915_drop_caches drm/i915: adjust get_crtc_fence_y_offset() to use base.y instead of crtc.y ...
This commit is contained in:
commit
36a5fdf76d
|
@ -410,6 +410,7 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter,
|
|||
{
|
||||
u8 data;
|
||||
int ret = 0;
|
||||
int retry;
|
||||
|
||||
if (!mode) {
|
||||
DRM_ERROR("NULL input\n");
|
||||
|
@ -417,10 +418,19 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter,
|
|||
}
|
||||
|
||||
/* Read Status: i2c over aux */
|
||||
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE,
|
||||
&data, sizeof(data));
|
||||
for (retry = 0; retry < 6; retry++) {
|
||||
if (retry)
|
||||
usleep_range(500, 1000);
|
||||
|
||||
ret = drm_dp_dual_mode_read(adapter,
|
||||
DP_DUAL_MODE_LSPCON_CURRENT_MODE,
|
||||
&data, sizeof(data));
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("LSPCON read(0x80, 0x41) failed\n");
|
||||
DRM_DEBUG_KMS("LSPCON read(0x80, 0x41) failed\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ i915-y += intel_uc.o \
|
|||
intel_guc.o \
|
||||
intel_guc_ct.o \
|
||||
intel_guc_log.o \
|
||||
intel_guc_loader.o \
|
||||
intel_guc_fw.o \
|
||||
intel_huc.o \
|
||||
i915_guc_submission.o
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ static char get_active_flag(struct drm_i915_gem_object *obj)
|
|||
|
||||
static char get_pin_flag(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->pin_display ? 'p' : ' ';
|
||||
return obj->pin_global ? 'p' : ' ';
|
||||
}
|
||||
|
||||
static char get_tiling_flag(struct drm_i915_gem_object *obj)
|
||||
|
@ -180,8 +180,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
pin_count++;
|
||||
}
|
||||
seq_printf(m, " (pinned x %d)", pin_count);
|
||||
if (obj->pin_display)
|
||||
seq_printf(m, " (display)");
|
||||
if (obj->pin_global)
|
||||
seq_printf(m, " (global)");
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
@ -271,7 +271,9 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
|||
goto out;
|
||||
|
||||
total_obj_size = total_gtt_size = count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
|
||||
if (count == total)
|
||||
break;
|
||||
|
||||
|
@ -283,7 +285,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
|||
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
|
||||
|
||||
}
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
|
||||
if (count == total)
|
||||
break;
|
||||
|
||||
|
@ -293,6 +295,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
|||
objects[count++] = obj;
|
||||
total_obj_size += obj->base.size;
|
||||
}
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
|
||||
|
||||
|
@ -454,7 +457,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
|||
mapped_size = mapped_count = 0;
|
||||
purgeable_size = purgeable_count = 0;
|
||||
huge_size = huge_count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
|
||||
size += obj->base.size;
|
||||
++count;
|
||||
|
||||
|
@ -477,11 +482,11 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
|||
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
|
||||
|
||||
size = count = dpy_size = dpy_count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
|
||||
size += obj->base.size;
|
||||
++count;
|
||||
|
||||
if (obj->pin_display) {
|
||||
if (obj->pin_global) {
|
||||
dpy_size += obj->base.size;
|
||||
++dpy_count;
|
||||
}
|
||||
|
@ -502,6 +507,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
|||
page_sizes |= obj->mm.page_sizes.sg;
|
||||
}
|
||||
}
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
seq_printf(m, "%u bound objects, %llu bytes\n",
|
||||
count, size);
|
||||
seq_printf(m, "%u purgeable objects, %llu bytes\n",
|
||||
|
@ -512,7 +519,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
|||
huge_count,
|
||||
stringify_page_sizes(page_sizes, buf, sizeof(buf)),
|
||||
huge_size);
|
||||
seq_printf(m, "%u display objects (pinned), %llu bytes\n",
|
||||
seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
|
||||
dpy_count, dpy_size);
|
||||
|
||||
seq_printf(m, "%llu [%llu] gtt total\n",
|
||||
|
@ -568,32 +575,46 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
|
|||
struct drm_info_node *node = m->private;
|
||||
struct drm_i915_private *dev_priv = node_to_i915(node);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
bool show_pin_display_only = !!node->info_ent->data;
|
||||
struct drm_i915_gem_object **objects;
|
||||
struct drm_i915_gem_object *obj;
|
||||
u64 total_obj_size, total_gtt_size;
|
||||
unsigned long nobject, n;
|
||||
int count, ret;
|
||||
|
||||
nobject = READ_ONCE(dev_priv->mm.object_count);
|
||||
objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
|
||||
if (!objects)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
total_obj_size = total_gtt_size = count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
|
||||
if (show_pin_display_only && !obj->pin_display)
|
||||
continue;
|
||||
count = 0;
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
|
||||
objects[count++] = obj;
|
||||
if (count == nobject)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
total_obj_size = total_gtt_size = 0;
|
||||
for (n = 0; n < count; n++) {
|
||||
obj = objects[n];
|
||||
|
||||
seq_puts(m, " ");
|
||||
describe_obj(m, obj);
|
||||
seq_putc(m, '\n');
|
||||
total_obj_size += obj->base.size;
|
||||
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
|
||||
count++;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
|
||||
count, total_obj_size, total_gtt_size);
|
||||
kvfree(objects);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -643,54 +664,6 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void print_request(struct seq_file *m,
|
||||
struct drm_i915_gem_request *rq,
|
||||
const char *prefix)
|
||||
{
|
||||
seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
|
||||
rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
|
||||
rq->priotree.priority,
|
||||
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
|
||||
rq->timeline->common->name);
|
||||
}
|
||||
|
||||
static int i915_gem_request_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int ret, any;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
any = 0;
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(req, &engine->timeline->requests, link)
|
||||
count++;
|
||||
if (count == 0)
|
||||
continue;
|
||||
|
||||
seq_printf(m, "%s requests: %d\n", engine->name, count);
|
||||
list_for_each_entry(req, &engine->timeline->requests, link)
|
||||
print_request(m, req, " ");
|
||||
|
||||
any++;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (any == 0)
|
||||
seq_puts(m, "No requests\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_ring_seqno_info(struct seq_file *m,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
|
@ -2386,27 +2359,13 @@ static int i915_llc(struct seq_file *m, void *data)
|
|||
static int i915_huc_load_status_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
|
||||
struct drm_printer p;
|
||||
|
||||
if (!HAS_HUC_UCODE(dev_priv))
|
||||
return 0;
|
||||
|
||||
seq_puts(m, "HuC firmware status:\n");
|
||||
seq_printf(m, "\tpath: %s\n", huc_fw->path);
|
||||
seq_printf(m, "\tfetch: %s\n",
|
||||
intel_uc_fw_status_repr(huc_fw->fetch_status));
|
||||
seq_printf(m, "\tload: %s\n",
|
||||
intel_uc_fw_status_repr(huc_fw->load_status));
|
||||
seq_printf(m, "\tversion wanted: %d.%d\n",
|
||||
huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
|
||||
seq_printf(m, "\tversion found: %d.%d\n",
|
||||
huc_fw->major_ver_found, huc_fw->minor_ver_found);
|
||||
seq_printf(m, "\theader: offset is %d; size = %d\n",
|
||||
huc_fw->header_offset, huc_fw->header_size);
|
||||
seq_printf(m, "\tuCode: offset is %d; size = %d\n",
|
||||
huc_fw->ucode_offset, huc_fw->ucode_size);
|
||||
seq_printf(m, "\tRSA: offset is %d; size = %d\n",
|
||||
huc_fw->rsa_offset, huc_fw->rsa_size);
|
||||
p = drm_seq_file_printer(m);
|
||||
intel_uc_fw_dump(&dev_priv->huc.fw, &p);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
|
||||
|
@ -2418,29 +2377,14 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
|
|||
static int i915_guc_load_status_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
|
||||
struct drm_printer p;
|
||||
u32 tmp, i;
|
||||
|
||||
if (!HAS_GUC_UCODE(dev_priv))
|
||||
return 0;
|
||||
|
||||
seq_printf(m, "GuC firmware status:\n");
|
||||
seq_printf(m, "\tpath: %s\n",
|
||||
guc_fw->path);
|
||||
seq_printf(m, "\tfetch: %s\n",
|
||||
intel_uc_fw_status_repr(guc_fw->fetch_status));
|
||||
seq_printf(m, "\tload: %s\n",
|
||||
intel_uc_fw_status_repr(guc_fw->load_status));
|
||||
seq_printf(m, "\tversion wanted: %d.%d\n",
|
||||
guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
|
||||
seq_printf(m, "\tversion found: %d.%d\n",
|
||||
guc_fw->major_ver_found, guc_fw->minor_ver_found);
|
||||
seq_printf(m, "\theader: offset is %d; size = %d\n",
|
||||
guc_fw->header_offset, guc_fw->header_size);
|
||||
seq_printf(m, "\tuCode: offset is %d; size = %d\n",
|
||||
guc_fw->ucode_offset, guc_fw->ucode_size);
|
||||
seq_printf(m, "\tRSA: offset is %d; size = %d\n",
|
||||
guc_fw->rsa_offset, guc_fw->rsa_size);
|
||||
p = drm_seq_file_printer(m);
|
||||
intel_uc_fw_dump(&dev_priv->guc.fw, &p);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
|
@ -3310,6 +3254,16 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_shrinker_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *i915 = node_to_i915(m->private);
|
||||
|
||||
seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
|
||||
seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_semaphore_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
|
@ -4225,18 +4179,20 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
|
|||
i915_ring_test_irq_get, i915_ring_test_irq_set,
|
||||
"0x%08llx\n");
|
||||
|
||||
#define DROP_UNBOUND 0x1
|
||||
#define DROP_BOUND 0x2
|
||||
#define DROP_RETIRE 0x4
|
||||
#define DROP_ACTIVE 0x8
|
||||
#define DROP_FREED 0x10
|
||||
#define DROP_SHRINK_ALL 0x20
|
||||
#define DROP_UNBOUND BIT(0)
|
||||
#define DROP_BOUND BIT(1)
|
||||
#define DROP_RETIRE BIT(2)
|
||||
#define DROP_ACTIVE BIT(3)
|
||||
#define DROP_FREED BIT(4)
|
||||
#define DROP_SHRINK_ALL BIT(5)
|
||||
#define DROP_IDLE BIT(6)
|
||||
#define DROP_ALL (DROP_UNBOUND | \
|
||||
DROP_BOUND | \
|
||||
DROP_RETIRE | \
|
||||
DROP_ACTIVE | \
|
||||
DROP_FREED | \
|
||||
DROP_SHRINK_ALL)
|
||||
DROP_SHRINK_ALL |\
|
||||
DROP_IDLE)
|
||||
static int
|
||||
i915_drop_caches_get(void *data, u64 *val)
|
||||
{
|
||||
|
@ -4252,7 +4208,8 @@ i915_drop_caches_set(void *data, u64 val)
|
|||
struct drm_device *dev = &dev_priv->drm;
|
||||
int ret = 0;
|
||||
|
||||
DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
|
||||
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
|
||||
val, val & DROP_ALL);
|
||||
|
||||
/* No need to check and wait for gpu resets, only libdrm auto-restarts
|
||||
* on ioctls on -EAGAIN. */
|
||||
|
@ -4283,6 +4240,9 @@ i915_drop_caches_set(void *data, u64 val)
|
|||
i915_gem_shrink_all(dev_priv);
|
||||
fs_reclaim_release(GFP_KERNEL);
|
||||
|
||||
if (val & DROP_IDLE)
|
||||
drain_delayed_work(&dev_priv->gt.idle_work);
|
||||
|
||||
if (val & DROP_FREED) {
|
||||
synchronize_rcu();
|
||||
i915_gem_drain_freed_objects(dev_priv);
|
||||
|
@ -4751,9 +4711,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_capabilities", i915_capabilities, 0},
|
||||
{"i915_gem_objects", i915_gem_object_info, 0},
|
||||
{"i915_gem_gtt", i915_gem_gtt_info, 0},
|
||||
{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
|
||||
{"i915_gem_stolen", i915_gem_stolen_list_info },
|
||||
{"i915_gem_request", i915_gem_request_info, 0},
|
||||
{"i915_gem_seqno", i915_gem_seqno_info, 0},
|
||||
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
|
||||
{"i915_gem_interrupt", i915_interrupt_info, 0},
|
||||
|
@ -4791,6 +4749,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_dmc_info", i915_dmc_info, 0},
|
||||
{"i915_display_info", i915_display_info, 0},
|
||||
{"i915_engine_info", i915_engine_info, 0},
|
||||
{"i915_shrinker_info", i915_shrinker_info, 0},
|
||||
{"i915_semaphore_status", i915_semaphore_status, 0},
|
||||
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
|
||||
{"i915_dp_mst_info", i915_dp_mst_info, 0},
|
||||
|
|
|
@ -80,8 +80,8 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20171012"
|
||||
#define DRIVER_TIMESTAMP 1507831511
|
||||
#define DRIVER_DATE "20171023"
|
||||
#define DRIVER_TIMESTAMP 1508748913
|
||||
|
||||
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
|
||||
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
|
||||
|
@ -785,7 +785,6 @@ struct intel_csr {
|
|||
func(has_logical_ring_contexts); \
|
||||
func(has_logical_ring_preemption); \
|
||||
func(has_overlay); \
|
||||
func(has_pipe_cxsr); \
|
||||
func(has_pooled_eu); \
|
||||
func(has_psr); \
|
||||
func(has_rc6); \
|
||||
|
@ -1108,6 +1107,16 @@ struct intel_fbc {
|
|||
int src_w;
|
||||
int src_h;
|
||||
bool visible;
|
||||
/*
|
||||
* Display surface base address adjustement for
|
||||
* pageflips. Note that on gen4+ this only adjusts up
|
||||
* to a tile, offsets within a tile are handled in
|
||||
* the hw itself (with the TILEOFF register).
|
||||
*/
|
||||
int adjusted_x;
|
||||
int adjusted_y;
|
||||
|
||||
int y;
|
||||
} plane;
|
||||
|
||||
struct {
|
||||
|
@ -1490,6 +1499,9 @@ struct i915_gem_mm {
|
|||
* always the inner lock when overlapping with struct_mutex. */
|
||||
struct mutex stolen_lock;
|
||||
|
||||
/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
|
||||
spinlock_t obj_lock;
|
||||
|
||||
/** List of all objects in gtt_space. Used to restore gtt
|
||||
* mappings on resume */
|
||||
struct list_head bound_list;
|
||||
|
@ -1510,6 +1522,7 @@ struct i915_gem_mm {
|
|||
*/
|
||||
struct llist_head free_list;
|
||||
struct work_struct free_work;
|
||||
spinlock_t free_lock;
|
||||
|
||||
/**
|
||||
* Small stash of WC pages
|
||||
|
@ -1765,6 +1778,8 @@ struct intel_vbt_data {
|
|||
u16 panel_id;
|
||||
struct mipi_config *config;
|
||||
struct mipi_pps_data *pps;
|
||||
u16 bl_ports;
|
||||
u16 cabc_ports;
|
||||
u8 seq_version;
|
||||
u32 size;
|
||||
u8 *data;
|
||||
|
@ -1960,13 +1975,7 @@ struct i915_wa_reg {
|
|||
u32 mask;
|
||||
};
|
||||
|
||||
/*
|
||||
* RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
|
||||
* allowing it for RCS as we don't foresee any requirement of having
|
||||
* a whitelist for other engines. When it is really required for
|
||||
* other engines then the limit need to be increased.
|
||||
*/
|
||||
#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
|
||||
#define I915_MAX_WA_REGS 16
|
||||
|
||||
struct i915_workarounds {
|
||||
struct i915_wa_reg reg[I915_MAX_WA_REGS];
|
||||
|
@ -3077,6 +3086,7 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
|
||||
#define CNL_REVID_A0 0x0
|
||||
#define CNL_REVID_B0 0x1
|
||||
#define CNL_REVID_C0 0x2
|
||||
|
||||
#define IS_CNL_REVID(p, since, until) \
|
||||
(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
|
||||
|
@ -3168,7 +3178,6 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
|
||||
|
||||
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
|
||||
#define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr)
|
||||
#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
|
||||
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7)
|
||||
|
||||
|
@ -3565,10 +3574,16 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
|
|||
return __i915_gem_object_get_pages(obj);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
|
||||
}
|
||||
|
||||
static inline void
|
||||
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
GEM_BUG_ON(!obj->mm.pages);
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
|
||||
atomic_inc(&obj->mm.pages_pin_count);
|
||||
}
|
||||
|
@ -3582,8 +3597,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
|
|||
static inline void
|
||||
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
GEM_BUG_ON(!obj->mm.pages);
|
||||
|
||||
atomic_dec(&obj->mm.pages_pin_count);
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
|
|||
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
|
||||
return true;
|
||||
|
||||
return obj->pin_display;
|
||||
return obj->pin_global; /* currently in use by HW, keep flushed */
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1240,7 +1240,23 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_runtime_pm_get(i915);
|
||||
if (i915_gem_object_has_struct_page(obj)) {
|
||||
/*
|
||||
* Avoid waking the device up if we can fallback, as
|
||||
* waking/resuming is very slow (worst-case 10-100 ms
|
||||
* depending on PCI sleeps and our own resume time).
|
||||
* This easily dwarfs any performance advantage from
|
||||
* using the cache bypass of indirect GGTT access.
|
||||
*/
|
||||
if (!intel_runtime_pm_get_if_in_use(i915)) {
|
||||
ret = -EFAULT;
|
||||
goto out_unlock;
|
||||
}
|
||||
} else {
|
||||
/* No backing pages, no fallback, we must force GGTT access */
|
||||
intel_runtime_pm_get(i915);
|
||||
}
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
|
||||
PIN_MAPPABLE |
|
||||
PIN_NONFAULT |
|
||||
|
@ -1257,7 +1273,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
|||
if (IS_ERR(vma)) {
|
||||
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
goto out_rpm;
|
||||
GEM_BUG_ON(!node.allocated);
|
||||
}
|
||||
|
||||
|
@ -1320,8 +1336,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
|||
} else {
|
||||
i915_vma_unpin(vma);
|
||||
}
|
||||
out_unlock:
|
||||
out_rpm:
|
||||
intel_runtime_pm_put(i915);
|
||||
out_unlock:
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1537,6 +1554,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
|
|||
struct list_head *list;
|
||||
struct i915_vma *vma;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (!i915_vma_is_ggtt(vma))
|
||||
break;
|
||||
|
@ -1551,8 +1570,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
|
|||
}
|
||||
|
||||
i915 = to_i915(obj->base.dev);
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
|
||||
list_move_tail(&obj->global_link, list);
|
||||
list_move_tail(&obj->mm.link, list);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2196,7 +2217,7 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
|
|||
struct address_space *mapping;
|
||||
|
||||
lockdep_assert_held(&obj->mm.lock);
|
||||
GEM_BUG_ON(obj->mm.pages);
|
||||
GEM_BUG_ON(i915_gem_object_has_pages(obj));
|
||||
|
||||
switch (obj->mm.madv) {
|
||||
case I915_MADV_DONTNEED:
|
||||
|
@ -2253,13 +2274,14 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
|
|||
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
||||
enum i915_mm_subclass subclass)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct sg_table *pages;
|
||||
|
||||
if (i915_gem_object_has_pinned_pages(obj))
|
||||
return;
|
||||
|
||||
GEM_BUG_ON(obj->bind_count);
|
||||
if (!READ_ONCE(obj->mm.pages))
|
||||
if (!i915_gem_object_has_pages(obj))
|
||||
return;
|
||||
|
||||
/* May be called by shrinker from within get_pages() (on another bo) */
|
||||
|
@ -2273,6 +2295,10 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
|||
pages = fetch_and_zero(&obj->mm.pages);
|
||||
GEM_BUG_ON(!pages);
|
||||
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_del(&obj->mm.link);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
|
||||
if (obj->mm.mapping) {
|
||||
void *ptr;
|
||||
|
||||
|
@ -2507,7 +2533,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
|||
obj->mm.pages = pages;
|
||||
|
||||
if (i915_gem_object_is_tiled(obj) &&
|
||||
to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
GEM_BUG_ON(obj->mm.quirked);
|
||||
__i915_gem_object_pin_pages(obj);
|
||||
obj->mm.quirked = true;
|
||||
|
@ -2529,8 +2555,11 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
|||
if (obj->mm.page_sizes.phys & ~0u << i)
|
||||
obj->mm.page_sizes.sg |= BIT(i);
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
|
||||
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_add(&obj->mm.link, &i915->mm.unbound_list);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
}
|
||||
|
||||
static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
||||
|
@ -2563,7 +2592,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
|
||||
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
||||
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
|
@ -2648,7 +2677,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
|||
type &= ~I915_MAP_OVERRIDE;
|
||||
|
||||
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
||||
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
|
||||
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
||||
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
ret = ____i915_gem_object_get_pages(obj);
|
||||
|
@ -2660,7 +2689,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
|||
atomic_inc(&obj->mm.pages_pin_count);
|
||||
pinned = false;
|
||||
}
|
||||
GEM_BUG_ON(!obj->mm.pages);
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
|
||||
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
||||
if (ptr && has_type != type) {
|
||||
|
@ -2715,7 +2744,7 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
|||
* allows it to avoid the cost of retrieving a page (either swapin
|
||||
* or clearing-before-use) before it is overwritten.
|
||||
*/
|
||||
if (READ_ONCE(obj->mm.pages))
|
||||
if (i915_gem_object_has_pages(obj))
|
||||
return -ENODEV;
|
||||
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED)
|
||||
|
@ -3090,7 +3119,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void nop_submit_request(struct drm_i915_gem_request *request)
|
||||
{
|
||||
GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
|
||||
dma_fence_set_error(&request->fence, -EIO);
|
||||
|
||||
i915_gem_request_submit(request);
|
||||
|
@ -3100,7 +3128,6 @@ static void nop_complete_submit_request(struct drm_i915_gem_request *request)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
|
||||
dma_fence_set_error(&request->fence, -EIO);
|
||||
|
||||
spin_lock_irqsave(&request->engine->timeline->lock, flags);
|
||||
|
@ -3498,7 +3525,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
|
|||
|
||||
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (!READ_ONCE(obj->pin_display))
|
||||
if (!READ_ONCE(obj->pin_global))
|
||||
return;
|
||||
|
||||
mutex_lock(&obj->base.dev->struct_mutex);
|
||||
|
@ -3865,10 +3892,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
|||
|
||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||
|
||||
/* Mark the pin_display early so that we account for the
|
||||
/* Mark the global pin early so that we account for the
|
||||
* display coherency whilst setting up the cache domains.
|
||||
*/
|
||||
obj->pin_display++;
|
||||
obj->pin_global++;
|
||||
|
||||
/* The display engine is not coherent with the LLC cache on gen6. As
|
||||
* a result, we make sure that the pinning that is about to occur is
|
||||
|
@ -3884,7 +3911,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
|||
I915_CACHE_WT : I915_CACHE_NONE);
|
||||
if (ret) {
|
||||
vma = ERR_PTR(ret);
|
||||
goto err_unpin_display;
|
||||
goto err_unpin_global;
|
||||
}
|
||||
|
||||
/* As the user may map the buffer once pinned in the display plane
|
||||
|
@ -3915,7 +3942,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
|||
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
|
||||
}
|
||||
if (IS_ERR(vma))
|
||||
goto err_unpin_display;
|
||||
goto err_unpin_global;
|
||||
|
||||
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
|
||||
|
||||
|
@ -3930,8 +3957,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
|||
|
||||
return vma;
|
||||
|
||||
err_unpin_display:
|
||||
obj->pin_display--;
|
||||
err_unpin_global:
|
||||
obj->pin_global--;
|
||||
return vma;
|
||||
}
|
||||
|
||||
|
@ -3940,10 +3967,10 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
|
|||
{
|
||||
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
|
||||
|
||||
if (WARN_ON(vma->obj->pin_display == 0))
|
||||
if (WARN_ON(vma->obj->pin_global == 0))
|
||||
return;
|
||||
|
||||
if (--vma->obj->pin_display == 0)
|
||||
if (--vma->obj->pin_global == 0)
|
||||
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
|
||||
|
||||
/* Bump the LRU to try and avoid premature eviction whilst flipping */
|
||||
|
@ -4283,7 +4310,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
if (obj->mm.pages &&
|
||||
if (i915_gem_object_has_pages(obj) &&
|
||||
i915_gem_object_is_tiled(obj) &&
|
||||
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
if (obj->mm.madv == I915_MADV_WILLNEED) {
|
||||
|
@ -4302,7 +4329,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
|||
obj->mm.madv = args->madv;
|
||||
|
||||
/* if the object is no longer attached, discard its backing storage */
|
||||
if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
|
||||
if (obj->mm.madv == I915_MADV_DONTNEED &&
|
||||
!i915_gem_object_has_pages(obj))
|
||||
i915_gem_object_truncate(obj);
|
||||
|
||||
args->retained = obj->mm.madv != __I915_MADV_PURGED;
|
||||
|
@ -4328,7 +4356,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
|||
{
|
||||
mutex_init(&obj->mm.lock);
|
||||
|
||||
INIT_LIST_HEAD(&obj->global_link);
|
||||
INIT_LIST_HEAD(&obj->vma_list);
|
||||
INIT_LIST_HEAD(&obj->lut_list);
|
||||
INIT_LIST_HEAD(&obj->batch_pool_link);
|
||||
|
@ -4483,13 +4510,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
|||
{
|
||||
struct drm_i915_gem_object *obj, *on;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
intel_runtime_pm_get(i915);
|
||||
llist_for_each_entry(obj, freed, freed) {
|
||||
llist_for_each_entry_safe(obj, on, freed, freed) {
|
||||
struct i915_vma *vma, *vn;
|
||||
|
||||
trace_i915_gem_object_destroy(obj);
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
|
||||
GEM_BUG_ON(i915_gem_object_is_active(obj));
|
||||
list_for_each_entry_safe(vma, vn,
|
||||
&obj->vma_list, obj_link) {
|
||||
|
@ -4500,14 +4528,20 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
|||
GEM_BUG_ON(!list_empty(&obj->vma_list));
|
||||
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
|
||||
|
||||
list_del(&obj->global_link);
|
||||
}
|
||||
intel_runtime_pm_put(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
/* This serializes freeing with the shrinker. Since the free
|
||||
* is delayed, first by RCU then by the workqueue, we want the
|
||||
* shrinker to be able to free pages of unreferenced objects,
|
||||
* or else we may oom whilst there are plenty of deferred
|
||||
* freed objects.
|
||||
*/
|
||||
if (i915_gem_object_has_pages(obj)) {
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_del_init(&obj->mm.link);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
llist_for_each_entry_safe(obj, on, freed, freed) {
|
||||
GEM_BUG_ON(obj->bind_count);
|
||||
GEM_BUG_ON(obj->userfault_count);
|
||||
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
|
||||
|
@ -4519,7 +4553,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
|||
if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
|
||||
atomic_set(&obj->mm.pages_pin_count, 0);
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
GEM_BUG_ON(obj->mm.pages);
|
||||
GEM_BUG_ON(i915_gem_object_has_pages(obj));
|
||||
|
||||
if (obj->base.import_attach)
|
||||
drm_prime_gem_destroy(&obj->base, NULL);
|
||||
|
@ -4530,16 +4564,29 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
|||
|
||||
kfree(obj->bit_17);
|
||||
i915_gem_object_free(obj);
|
||||
|
||||
if (on)
|
||||
cond_resched();
|
||||
}
|
||||
intel_runtime_pm_put(i915);
|
||||
}
|
||||
|
||||
static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
|
||||
{
|
||||
struct llist_node *freed;
|
||||
|
||||
freed = llist_del_all(&i915->mm.free_list);
|
||||
if (unlikely(freed))
|
||||
/* Free the oldest, most stale object to keep the free_list short */
|
||||
freed = NULL;
|
||||
if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
|
||||
/* Only one consumer of llist_del_first() allowed */
|
||||
spin_lock(&i915->mm.free_lock);
|
||||
freed = llist_del_first(&i915->mm.free_list);
|
||||
spin_unlock(&i915->mm.free_lock);
|
||||
}
|
||||
if (unlikely(freed)) {
|
||||
freed->next = NULL;
|
||||
__i915_gem_free_objects(i915, freed);
|
||||
}
|
||||
}
|
||||
|
||||
static void __i915_gem_free_work(struct work_struct *work)
|
||||
|
@ -4840,6 +4887,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
|
|||
init_unused_rings(dev_priv);
|
||||
|
||||
BUG_ON(!dev_priv->kernel_context);
|
||||
if (i915_terminally_wedged(&dev_priv->gpu_error)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = i915_ppgtt_init_hw(dev_priv);
|
||||
if (ret) {
|
||||
|
@ -4938,8 +4989,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
|
|||
* wedged. But we only want to do this where the GPU is angry,
|
||||
* for all other failure, such as an allocation failure, bail.
|
||||
*/
|
||||
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
|
||||
i915_gem_set_wedged(dev_priv);
|
||||
if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
|
||||
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
|
||||
i915_gem_set_wedged(dev_priv);
|
||||
}
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
|
@ -5039,11 +5092,15 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
|
|||
goto err_priorities;
|
||||
|
||||
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
|
||||
|
||||
spin_lock_init(&dev_priv->mm.obj_lock);
|
||||
spin_lock_init(&dev_priv->mm.free_lock);
|
||||
init_llist_head(&dev_priv->mm.free_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
|
||||
i915_gem_retire_work_handler);
|
||||
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
|
||||
|
@ -5137,12 +5194,12 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
|
|||
i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
|
||||
i915_gem_drain_freed_objects(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
for (p = phases; *p; p++) {
|
||||
list_for_each_entry(obj, *p, global_link)
|
||||
list_for_each_entry(obj, *p, mm.link)
|
||||
__start_cpu_write(obj);
|
||||
}
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -5461,7 +5518,17 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
|
|||
goto err_unlock;
|
||||
}
|
||||
|
||||
pages = obj->mm.pages;
|
||||
pages = fetch_and_zero(&obj->mm.pages);
|
||||
if (pages) {
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
|
||||
__i915_gem_object_reset_page_iter(obj);
|
||||
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_del(&obj->mm.link);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
}
|
||||
|
||||
obj->ops = &i915_gem_phys_ops;
|
||||
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
|
|
|
@ -70,6 +70,7 @@ static const struct dma_fence_ops i915_clflush_ops = {
|
|||
|
||||
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
drm_clflush_sg(obj->mm.pages);
|
||||
intel_fb_obj_flush(obj, ORIGIN_CPU);
|
||||
}
|
||||
|
|
|
@ -33,6 +33,10 @@
|
|||
#include "intel_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
|
||||
bool fail_if_busy:1;
|
||||
} igt_evict_ctl;)
|
||||
|
||||
static bool ggtt_is_idle(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
@ -205,6 +209,9 @@ i915_gem_evict_something(struct i915_address_space *vm,
|
|||
* the kernel's there is no more we can evict.
|
||||
*/
|
||||
if (!ggtt_is_idle(dev_priv)) {
|
||||
if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
|
||||
return -EBUSY;
|
||||
|
||||
ret = ggtt_flush(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -3594,8 +3594,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
|
|||
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
|
||||
|
||||
/* clflush objects bound into the GGTT and rebind them. */
|
||||
list_for_each_entry_safe(obj, on,
|
||||
&dev_priv->mm.bound_list, global_link) {
|
||||
list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
|
||||
bool ggtt_bound = false;
|
||||
struct i915_vma *vma;
|
||||
|
||||
|
|
|
@ -114,7 +114,6 @@ struct drm_i915_gem_object {
|
|||
|
||||
/** Stolen memory for this object, instead of being backed by shmem. */
|
||||
struct drm_mm_node *stolen;
|
||||
struct list_head global_link;
|
||||
union {
|
||||
struct rcu_head rcu;
|
||||
struct llist_node freed;
|
||||
|
@ -161,7 +160,8 @@ struct drm_i915_gem_object {
|
|||
/** Count of VMA actually bound by this object */
|
||||
unsigned int bind_count;
|
||||
unsigned int active_count;
|
||||
unsigned int pin_display;
|
||||
/** Count of how many global VMA are currently pinned for use by HW */
|
||||
unsigned int pin_global;
|
||||
|
||||
struct {
|
||||
struct mutex lock; /* protects the pages and their use */
|
||||
|
@ -207,6 +207,12 @@ struct drm_i915_gem_object {
|
|||
struct mutex lock; /* protects this cache */
|
||||
} get_page;
|
||||
|
||||
/**
|
||||
* Element within i915->mm.unbound_list or i915->mm.bound_list,
|
||||
* locked by i915->mm.obj_lock.
|
||||
*/
|
||||
struct list_head link;
|
||||
|
||||
/**
|
||||
* Advice: are the backing pages purgeable?
|
||||
*/
|
||||
|
|
|
@ -229,7 +229,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
|
|||
return 0;
|
||||
|
||||
/* Recreate the page after shrinking */
|
||||
if (!so->vma->obj->mm.pages)
|
||||
if (!i915_gem_object_has_pages(so->vma->obj))
|
||||
so->batch_offset = -1;
|
||||
|
||||
ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
|
||||
|
|
|
@ -71,25 +71,6 @@ static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
|
|||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
/* Only GGTT vma may be permanently pinned, and are always
|
||||
* at the start of the list. We can stop hunting as soon
|
||||
* as we see a ppGTT vma.
|
||||
*/
|
||||
if (!i915_vma_is_ggtt(vma))
|
||||
break;
|
||||
|
||||
if (i915_vma_is_pinned(vma))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool swap_available(void)
|
||||
{
|
||||
return get_nr_swap_pages() > 0;
|
||||
|
@ -97,9 +78,6 @@ static bool swap_available(void)
|
|||
|
||||
static bool can_release_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (!obj->mm.pages)
|
||||
return false;
|
||||
|
||||
/* Consider only shrinkable ojects. */
|
||||
if (!i915_gem_object_is_shrinkable(obj))
|
||||
return false;
|
||||
|
@ -115,7 +93,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
|
|||
if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
|
||||
return false;
|
||||
|
||||
if (any_vma_pinned(obj))
|
||||
/* If any vma are "permanently" pinned, it will prevent us from
|
||||
* reclaiming the obj->mm.pages. We only allow scanout objects to claim
|
||||
* a permanent pin, along with a few others like the context objects.
|
||||
* To simplify the scan, and to avoid walking the list of vma under the
|
||||
* object, we just check the count of its permanently pinned.
|
||||
*/
|
||||
if (READ_ONCE(obj->pin_global))
|
||||
return false;
|
||||
|
||||
/* We can only return physical pages to the system if we can either
|
||||
|
@ -129,7 +113,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
|
|||
{
|
||||
if (i915_gem_object_unbind(obj) == 0)
|
||||
__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
|
||||
return !READ_ONCE(obj->mm.pages);
|
||||
return !i915_gem_object_has_pages(obj);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -217,15 +201,20 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|||
continue;
|
||||
|
||||
INIT_LIST_HEAD(&still_in_list);
|
||||
|
||||
/*
|
||||
* We serialize our access to unreferenced objects through
|
||||
* the use of the struct_mutex. While the objects are not
|
||||
* yet freed (due to RCU then a workqueue) we still want
|
||||
* to be able to shrink their pages, so they remain on
|
||||
* the unbound/bound list until actually freed.
|
||||
*/
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
while (count < target &&
|
||||
(obj = list_first_entry_or_null(phase->list,
|
||||
typeof(*obj),
|
||||
global_link))) {
|
||||
list_move_tail(&obj->global_link, &still_in_list);
|
||||
if (!obj->mm.pages) {
|
||||
list_del_init(&obj->global_link);
|
||||
continue;
|
||||
}
|
||||
mm.link))) {
|
||||
list_move_tail(&obj->mm.link, &still_in_list);
|
||||
|
||||
if (flags & I915_SHRINK_PURGEABLE &&
|
||||
obj->mm.madv != I915_MADV_DONTNEED)
|
||||
|
@ -243,20 +232,24 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|||
if (!can_release_pages(obj))
|
||||
continue;
|
||||
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
if (unsafe_drop_pages(obj)) {
|
||||
/* May arrive from get_pages on another bo */
|
||||
mutex_lock_nested(&obj->mm.lock,
|
||||
I915_MM_SHRINKER);
|
||||
if (!obj->mm.pages) {
|
||||
if (!i915_gem_object_has_pages(obj)) {
|
||||
__i915_gem_object_invalidate(obj);
|
||||
list_del_init(&obj->global_link);
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
scanned += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
scanned += obj->base.size >> PAGE_SHIFT;
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
}
|
||||
list_splice_tail(&still_in_list, phase->list);
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
}
|
||||
|
||||
if (flags & I915_SHRINK_BOUND)
|
||||
|
@ -302,29 +295,40 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
|
|||
static unsigned long
|
||||
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
struct drm_i915_private *i915 =
|
||||
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long count;
|
||||
bool unlock;
|
||||
unsigned long num_objects = 0;
|
||||
unsigned long count = 0;
|
||||
|
||||
if (!shrinker_lock(dev_priv, &unlock))
|
||||
return 0;
|
||||
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
|
||||
if (can_release_pages(obj))
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
|
||||
if (can_release_pages(obj)) {
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
num_objects++;
|
||||
}
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
|
||||
if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
|
||||
list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
|
||||
if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
num_objects++;
|
||||
}
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
|
||||
/* Update our preferred vmscan batch size for the next pass.
|
||||
* Our rough guess for an effective batch size is roughly 2
|
||||
* available GEM objects worth of pages. That is we don't want
|
||||
* the shrinker to fire, until it is worth the cost of freeing an
|
||||
* entire GEM object.
|
||||
*/
|
||||
if (num_objects) {
|
||||
unsigned long avg = 2 * count / num_objects;
|
||||
|
||||
i915->mm.shrinker.batch =
|
||||
max((i915->mm.shrinker.batch + avg) >> 1,
|
||||
128ul /* default SHRINK_BATCH */);
|
||||
}
|
||||
|
||||
shrinker_unlock(dev_priv, unlock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -400,10 +404,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|||
container_of(nb, struct drm_i915_private, mm.oom_notifier);
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long unevictable, bound, unbound, freed_pages;
|
||||
bool unlock;
|
||||
|
||||
if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
freed_pages = i915_gem_shrink_all(dev_priv);
|
||||
|
||||
|
@ -412,26 +412,20 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|||
* being pointed to by hardware.
|
||||
*/
|
||||
unbound = bound = unevictable = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
|
||||
if (!obj->mm.pages)
|
||||
continue;
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
|
||||
if (!can_release_pages(obj))
|
||||
unevictable += obj->base.size >> PAGE_SHIFT;
|
||||
else
|
||||
unbound += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
|
||||
if (!obj->mm.pages)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
|
||||
if (!can_release_pages(obj))
|
||||
unevictable += obj->base.size >> PAGE_SHIFT;
|
||||
else
|
||||
bound += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
shrinker_unlock(dev_priv, unlock);
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
if (freed_pages || unbound || bound)
|
||||
pr_info("Purging GPU memory, %lu pages freed, "
|
||||
|
@ -498,6 +492,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
|
|||
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
|
||||
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
|
||||
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
|
||||
dev_priv->mm.shrinker.batch = 4096;
|
||||
WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
|
||||
|
||||
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
|
||||
|
|
|
@ -724,8 +724,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
|
|||
vma->flags |= I915_VMA_GLOBAL_BIND;
|
||||
__i915_vma_set_map_and_fenceable(vma);
|
||||
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
|
||||
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
|
||||
obj->bind_count++;
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
return obj;
|
||||
|
||||
|
|
|
@ -269,7 +269,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
|||
* due to the change in swizzling.
|
||||
*/
|
||||
mutex_lock(&obj->mm.lock);
|
||||
if (obj->mm.pages &&
|
||||
if (i915_gem_object_has_pages(obj) &&
|
||||
obj->mm.madv == I915_MADV_WILLNEED &&
|
||||
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
if (tiling == I915_TILING_NONE) {
|
||||
|
|
|
@ -82,11 +82,11 @@ static void cancel_userptr(struct work_struct *work)
|
|||
/* We are inside a kthread context and can't be interrupted */
|
||||
if (i915_gem_object_unbind(obj) == 0)
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
WARN_ONCE(obj->mm.pages,
|
||||
"Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
|
||||
WARN_ONCE(i915_gem_object_has_pages(obj),
|
||||
"Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
|
||||
obj->bind_count,
|
||||
atomic_read(&obj->mm.pages_pin_count),
|
||||
obj->pin_display);
|
||||
obj->pin_global);
|
||||
|
||||
mutex_unlock(&obj->base.dev->struct_mutex);
|
||||
|
||||
|
@ -221,15 +221,17 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
|
|||
/* Protected by mm_lock */
|
||||
mm->mn = fetch_and_zero(&mn);
|
||||
}
|
||||
} else {
|
||||
/* someone else raced and successfully installed the mmu
|
||||
* notifier, we can cancel our own errors */
|
||||
} else if (mm->mn) {
|
||||
/*
|
||||
* Someone else raced and successfully installed the mmu
|
||||
* notifier, we can cancel our own errors.
|
||||
*/
|
||||
err = 0;
|
||||
}
|
||||
mutex_unlock(&mm->i915->mm_lock);
|
||||
up_write(&mm->mm->mmap_sem);
|
||||
|
||||
if (mn) {
|
||||
if (mn && !IS_ERR(mn)) {
|
||||
destroy_workqueue(mn->wq);
|
||||
kfree(mn);
|
||||
}
|
||||
|
|
|
@ -193,7 +193,6 @@ static const struct intel_device_info intel_i965gm_info __initconst = {
|
|||
static const struct intel_device_info intel_g45_info __initconst = {
|
||||
GEN4_FEATURES,
|
||||
.platform = INTEL_G45,
|
||||
.has_pipe_cxsr = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
};
|
||||
|
||||
|
@ -201,7 +200,6 @@ static const struct intel_device_info intel_gm45_info __initconst = {
|
|||
GEN4_FEATURES,
|
||||
.platform = INTEL_GM45,
|
||||
.is_mobile = 1, .has_fbc = 1,
|
||||
.has_pipe_cxsr = 1,
|
||||
.supports_tv = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
};
|
||||
|
@ -645,7 +643,7 @@ static void i915_pci_remove(struct pci_dev *pdev)
|
|||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
i915_driver_unload(dev);
|
||||
drm_dev_unref(dev);
|
||||
drm_dev_put(dev);
|
||||
}
|
||||
|
||||
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
|
|
@ -53,6 +53,7 @@ enum vgt_g2v_type {
|
|||
* VGT capabilities type
|
||||
*/
|
||||
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
|
||||
#define VGT_CAPS_HWSP_EMULATION BIT(3)
|
||||
|
||||
struct vgt_if {
|
||||
u64 magic; /* VGT_MAGIC */
|
||||
|
|
|
@ -5242,7 +5242,7 @@ enum {
|
|||
#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
|
||||
#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
|
||||
#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
|
||||
#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
|
||||
#define DP_AUX_CH_CTL_TIME_OUT_MAX (3 << 26) /* Varies per platform */
|
||||
#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
|
||||
#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
|
||||
#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
|
||||
|
|
|
@ -41,6 +41,11 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
|
|||
debug_object_init(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_activate(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_activate(fence, &i915_sw_fence_debug_descr);
|
||||
|
@ -79,6 +84,10 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_activate(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
@ -360,9 +369,9 @@ struct i915_sw_dma_fence_cb {
|
|||
struct irq_work work;
|
||||
};
|
||||
|
||||
static void timer_i915_sw_fence_wake(unsigned long data)
|
||||
static void timer_i915_sw_fence_wake(struct timer_list *t)
|
||||
{
|
||||
struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
|
||||
struct i915_sw_dma_fence_cb *cb = from_timer(cb, t, timer);
|
||||
struct i915_sw_fence *fence;
|
||||
|
||||
fence = xchg(&cb->fence, NULL);
|
||||
|
@ -425,9 +434,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
|
|||
i915_sw_fence_await(fence);
|
||||
|
||||
cb->dma = NULL;
|
||||
__setup_timer(&cb->timer,
|
||||
timer_i915_sw_fence_wake, (unsigned long)cb,
|
||||
TIMER_IRQSAFE);
|
||||
timer_setup(&cb->timer, timer_i915_sw_fence_wake, TIMER_IRQSAFE);
|
||||
init_irq_work(&cb->work, irq_i915_sw_fence_work);
|
||||
if (timeout) {
|
||||
cb->dma = dma_fence_get(dma);
|
||||
|
@ -507,5 +514,6 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
|
|||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftests/lib_sw_fence.c"
|
||||
#include "selftests/i915_sw_fence.c"
|
||||
#endif
|
||||
|
|
|
@ -30,6 +30,12 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv);
|
|||
|
||||
bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
|
||||
|
||||
static inline bool
|
||||
intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
|
||||
}
|
||||
|
||||
int intel_vgt_balloon(struct drm_i915_private *dev_priv);
|
||||
void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
|
|
@ -58,8 +58,10 @@ i915_vma_retire(struct i915_gem_active *active,
|
|||
* so that we don't steal from recently used but inactive objects
|
||||
* (unless we are forced to ofc!)
|
||||
*/
|
||||
spin_lock(&rq->i915->mm.obj_lock);
|
||||
if (obj->bind_count)
|
||||
list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
|
||||
list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
|
||||
spin_unlock(&rq->i915->mm.obj_lock);
|
||||
|
||||
obj->mm.dirty = true; /* be paranoid */
|
||||
|
||||
|
@ -563,9 +565,13 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
|||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
|
||||
|
||||
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
|
||||
obj->bind_count++;
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
|
||||
|
||||
return 0;
|
||||
|
@ -580,6 +586,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
|||
static void
|
||||
i915_vma_remove(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_private *i915 = vma->vm->i915;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
|
@ -593,9 +600,10 @@ i915_vma_remove(struct i915_vma *vma)
|
|||
/* Since the unbound list is global, only move to that list if
|
||||
* no more VMAs exist.
|
||||
*/
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
if (--obj->bind_count == 0)
|
||||
list_move_tail(&obj->global_link,
|
||||
&to_i915(obj->base.dev)->mm.unbound_list);
|
||||
list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
|
||||
/* And finally now the object is completely decoupled from this vma,
|
||||
* we can drop its hold on the backing storage and allow it to be
|
||||
|
|
|
@ -691,6 +691,48 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
|||
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
|
||||
}
|
||||
|
||||
static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
|
||||
u16 version, enum port port)
|
||||
{
|
||||
if (!dev_priv->vbt.dsi.config->dual_link || version < 197) {
|
||||
dev_priv->vbt.dsi.bl_ports = BIT(port);
|
||||
if (dev_priv->vbt.dsi.config->cabc_supported)
|
||||
dev_priv->vbt.dsi.cabc_ports = BIT(port);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
|
||||
case DL_DCS_PORT_A:
|
||||
dev_priv->vbt.dsi.bl_ports = BIT(PORT_A);
|
||||
break;
|
||||
case DL_DCS_PORT_C:
|
||||
dev_priv->vbt.dsi.bl_ports = BIT(PORT_C);
|
||||
break;
|
||||
default:
|
||||
case DL_DCS_PORT_A_AND_C:
|
||||
dev_priv->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!dev_priv->vbt.dsi.config->cabc_supported)
|
||||
return;
|
||||
|
||||
switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
|
||||
case DL_DCS_PORT_A:
|
||||
dev_priv->vbt.dsi.cabc_ports = BIT(PORT_A);
|
||||
break;
|
||||
case DL_DCS_PORT_C:
|
||||
dev_priv->vbt.dsi.cabc_ports = BIT(PORT_C);
|
||||
break;
|
||||
default:
|
||||
case DL_DCS_PORT_A_AND_C:
|
||||
dev_priv->vbt.dsi.cabc_ports =
|
||||
BIT(PORT_A) | BIT(PORT_C);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
parse_mipi_config(struct drm_i915_private *dev_priv,
|
||||
const struct bdb_header *bdb)
|
||||
|
@ -699,9 +741,10 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
|
|||
const struct mipi_config *config;
|
||||
const struct mipi_pps_data *pps;
|
||||
int panel_type = dev_priv->vbt.panel_type;
|
||||
enum port port;
|
||||
|
||||
/* parse MIPI blocks only if LFP type is MIPI */
|
||||
if (!intel_bios_is_dsi_present(dev_priv, NULL))
|
||||
if (!intel_bios_is_dsi_present(dev_priv, &port))
|
||||
return;
|
||||
|
||||
/* Initialize this to undefined indicating no generic MIPI support */
|
||||
|
@ -742,15 +785,7 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
|
|||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* These fields are introduced from the VBT version 197 onwards,
|
||||
* so making sure that these bits are set zero in the previous
|
||||
* versions.
|
||||
*/
|
||||
if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
|
||||
dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
|
||||
dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
|
||||
}
|
||||
parse_dsi_backlight_ports(dev_priv, bdb->version, port);
|
||||
|
||||
/* We have mandatory mipi config blocks. Initialize as generic panel */
|
||||
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
|
||||
|
@ -1071,6 +1106,22 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
}
|
||||
|
||||
static const u8 cnp_ddc_pin_map[] = {
|
||||
[DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
|
||||
[DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
|
||||
[DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
|
||||
[DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
|
||||
};
|
||||
|
||||
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
|
||||
{
|
||||
if (HAS_PCH_CNP(dev_priv) &&
|
||||
vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map))
|
||||
return cnp_ddc_pin_map[vbt_pin];
|
||||
|
||||
return vbt_pin;
|
||||
}
|
||||
|
||||
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
||||
u8 bdb_version)
|
||||
{
|
||||
|
@ -1163,16 +1214,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
|||
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
|
||||
|
||||
if (is_dvi) {
|
||||
info->alternate_ddc_pin = ddc_pin;
|
||||
|
||||
/*
|
||||
* All VBTs that we got so far for B Stepping has this
|
||||
* information wrong for Port D. So, let's just ignore for now.
|
||||
*/
|
||||
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0) &&
|
||||
port == PORT_D) {
|
||||
info->alternate_ddc_pin = 0;
|
||||
}
|
||||
info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
|
||||
|
||||
sanitize_ddc_pin(dev_priv, port);
|
||||
}
|
||||
|
|
|
@ -74,9 +74,10 @@ static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
|
|||
set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
|
||||
}
|
||||
|
||||
static void intel_breadcrumbs_hangcheck(unsigned long data)
|
||||
static void intel_breadcrumbs_hangcheck(struct timer_list *t)
|
||||
{
|
||||
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
|
||||
struct intel_engine_cs *engine = from_timer(engine, t,
|
||||
breadcrumbs.hangcheck);
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
|
||||
if (!b->irq_armed)
|
||||
|
@ -108,9 +109,10 @@ static void intel_breadcrumbs_hangcheck(unsigned long data)
|
|||
}
|
||||
}
|
||||
|
||||
static void intel_breadcrumbs_fake_irq(unsigned long data)
|
||||
static void intel_breadcrumbs_fake_irq(struct timer_list *t)
|
||||
{
|
||||
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
|
||||
struct intel_engine_cs *engine = from_timer(engine, t,
|
||||
breadcrumbs.fake_irq);
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
|
||||
/* The timer persists in case we cannot enable interrupts,
|
||||
|
@ -787,12 +789,8 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
|
|||
spin_lock_init(&b->rb_lock);
|
||||
spin_lock_init(&b->irq_lock);
|
||||
|
||||
setup_timer(&b->fake_irq,
|
||||
intel_breadcrumbs_fake_irq,
|
||||
(unsigned long)engine);
|
||||
setup_timer(&b->hangcheck,
|
||||
intel_breadcrumbs_hangcheck,
|
||||
(unsigned long)engine);
|
||||
timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
|
||||
timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
|
||||
|
||||
/* Spawn a thread to provide a common bottom-half for all signals.
|
||||
* As this is an asynchronous interface we cannot steal the current
|
||||
|
|
|
@ -343,11 +343,26 @@ intel_crt_mode_valid(struct drm_connector *connector,
|
|||
static bool intel_crt_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool pch_crt_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
pipe_config->has_pch_encoder = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool hsw_crt_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv))
|
||||
pipe_config->has_pch_encoder = true;
|
||||
pipe_config->has_pch_encoder = true;
|
||||
|
||||
/* LPT FDI RX only supports 8bpc. */
|
||||
if (HAS_PCH_LPT(dev_priv)) {
|
||||
|
@ -360,8 +375,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
|
|||
}
|
||||
|
||||
/* FDI must always be 2.7 GHz */
|
||||
if (HAS_DDI(dev_priv))
|
||||
pipe_config->port_clock = 135000 * 2;
|
||||
pipe_config->port_clock = 135000 * 2;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -959,11 +973,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
|
|||
!dmi_check_system(intel_spurious_crt_detect))
|
||||
crt->base.hpd_pin = HPD_CRT;
|
||||
|
||||
crt->base.compute_config = intel_crt_compute_config;
|
||||
if (HAS_DDI(dev_priv)) {
|
||||
crt->base.port = PORT_E;
|
||||
crt->base.get_config = hsw_crt_get_config;
|
||||
crt->base.get_hw_state = intel_ddi_get_hw_state;
|
||||
crt->base.compute_config = hsw_crt_compute_config;
|
||||
crt->base.pre_pll_enable = hsw_pre_pll_enable_crt;
|
||||
crt->base.pre_enable = hsw_pre_enable_crt;
|
||||
crt->base.enable = hsw_enable_crt;
|
||||
|
@ -971,9 +985,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
|
|||
crt->base.post_disable = hsw_post_disable_crt;
|
||||
} else {
|
||||
if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
crt->base.compute_config = pch_crt_compute_config;
|
||||
crt->base.disable = pch_disable_crt;
|
||||
crt->base.post_disable = pch_post_disable_crt;
|
||||
} else {
|
||||
crt->base.compute_config = intel_crt_compute_config;
|
||||
crt->base.disable = intel_disable_crt;
|
||||
}
|
||||
crt->base.port = PORT_NONE;
|
||||
|
|
|
@ -52,10 +52,6 @@ MODULE_FIRMWARE(I915_CSR_SKL);
|
|||
MODULE_FIRMWARE(I915_CSR_BXT);
|
||||
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
|
||||
|
||||
#define FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
|
||||
|
||||
|
||||
|
||||
|
||||
#define CSR_MAX_FW_SIZE 0x2FFF
|
||||
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
|
||||
|
@ -291,7 +287,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||
css_header = (struct intel_css_header *)fw->data;
|
||||
if (sizeof(struct intel_css_header) !=
|
||||
(css_header->header_len * 4)) {
|
||||
DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
|
||||
DRM_ERROR("DMC firmware has wrong CSS header length "
|
||||
"(%u bytes)\n",
|
||||
(css_header->header_len * 4));
|
||||
return NULL;
|
||||
}
|
||||
|
@ -315,7 +312,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||
|
||||
if (csr->version != required_version) {
|
||||
DRM_INFO("Refusing to load DMC firmware v%u.%u,"
|
||||
" please use v%u.%u [" FIRMWARE_URL "].\n",
|
||||
" please use v%u.%u\n",
|
||||
CSR_VERSION_MAJOR(csr->version),
|
||||
CSR_VERSION_MINOR(csr->version),
|
||||
CSR_VERSION_MAJOR(required_version),
|
||||
|
@ -330,7 +327,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||
&fw->data[readcount];
|
||||
if (sizeof(struct intel_package_header) !=
|
||||
(package_header->header_len * 4)) {
|
||||
DRM_ERROR("Firmware has wrong package header length %u bytes\n",
|
||||
DRM_ERROR("DMC firmware has wrong package header length "
|
||||
"(%u bytes)\n",
|
||||
(package_header->header_len * 4));
|
||||
return NULL;
|
||||
}
|
||||
|
@ -351,7 +349,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||
dmc_offset = package_header->fw_info[i].offset;
|
||||
}
|
||||
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
|
||||
DRM_ERROR("Firmware not supported for %c stepping\n",
|
||||
DRM_ERROR("DMC firmware not supported for %c stepping\n",
|
||||
si->stepping);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -360,7 +358,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||
/* Extract dmc_header information. */
|
||||
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
|
||||
if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
|
||||
DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
|
||||
DRM_ERROR("DMC firmware has wrong dmc header length "
|
||||
"(%u bytes)\n",
|
||||
(dmc_header->header_len));
|
||||
return NULL;
|
||||
}
|
||||
|
@ -368,7 +367,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||
|
||||
/* Cache the dmc header info. */
|
||||
if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
|
||||
DRM_ERROR("Firmware has wrong mmio count %u\n",
|
||||
DRM_ERROR("DMC firmware has wrong mmio count %u\n",
|
||||
dmc_header->mmio_count);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -376,7 +375,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||
for (i = 0; i < dmc_header->mmio_count; i++) {
|
||||
if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
|
||||
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
|
||||
DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
|
||||
DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
|
||||
dmc_header->mmioaddr[i]);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -387,7 +386,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
|
||||
nbytes = dmc_header->fw_size * 4;
|
||||
if (nbytes > CSR_MAX_FW_SIZE) {
|
||||
DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
|
||||
DRM_ERROR("DMC firmware too big (%u bytes)\n", nbytes);
|
||||
return NULL;
|
||||
}
|
||||
csr->dmc_fw_size = dmc_header->fw_size;
|
||||
|
@ -425,9 +424,11 @@ static void csr_load_work_fn(struct work_struct *work)
|
|||
CSR_VERSION_MINOR(csr->version));
|
||||
} else {
|
||||
dev_notice(dev_priv->drm.dev,
|
||||
"Failed to load DMC firmware"
|
||||
" [" FIRMWARE_URL "],"
|
||||
" disabling runtime power management.\n");
|
||||
"Failed to load DMC firmware %s."
|
||||
" Disabling runtime power management.\n",
|
||||
csr->fw_path);
|
||||
dev_notice(dev_priv->drm.dev, "DMC firmware homepage: %s",
|
||||
INTEL_UC_FIRMWARE_URL);
|
||||
}
|
||||
|
||||
release_firmware(fw);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2847,7 +2847,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
|||
|
||||
if (intel_plane_ggtt_offset(state) == plane_config->base) {
|
||||
fb = c->primary->fb;
|
||||
drm_framebuffer_reference(fb);
|
||||
drm_framebuffer_get(fb);
|
||||
goto valid_fb;
|
||||
}
|
||||
}
|
||||
|
@ -2878,7 +2878,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
|||
intel_crtc->pipe, PTR_ERR(intel_state->vma));
|
||||
|
||||
intel_state->vma = NULL;
|
||||
drm_framebuffer_unreference(fb);
|
||||
drm_framebuffer_put(fb);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2899,7 +2899,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
|||
if (i915_gem_object_is_tiled(obj))
|
||||
dev_priv->preserve_bios_swizzle = true;
|
||||
|
||||
drm_framebuffer_reference(fb);
|
||||
drm_framebuffer_get(fb);
|
||||
primary->fb = primary->state->fb = fb;
|
||||
primary->crtc = primary->state->crtc = &intel_crtc->base;
|
||||
|
||||
|
@ -3289,7 +3289,6 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
|
|||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
const struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
enum plane plane = primary->plane;
|
||||
u32 linear_offset;
|
||||
|
@ -3298,16 +3297,14 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
|
|||
int x = plane_state->main.x;
|
||||
int y = plane_state->main.y;
|
||||
unsigned long irqflags;
|
||||
u32 dspaddr_offset;
|
||||
|
||||
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 4)
|
||||
crtc->dspaddr_offset = plane_state->main.offset;
|
||||
dspaddr_offset = plane_state->main.offset;
|
||||
else
|
||||
crtc->dspaddr_offset = linear_offset;
|
||||
|
||||
crtc->adjusted_x = x;
|
||||
crtc->adjusted_y = y;
|
||||
dspaddr_offset = linear_offset;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
|
@ -3333,18 +3330,18 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
|
|||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
||||
I915_WRITE_FW(DSPSURF(plane),
|
||||
intel_plane_ggtt_offset(plane_state) +
|
||||
crtc->dspaddr_offset);
|
||||
dspaddr_offset);
|
||||
I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
|
||||
} else if (INTEL_GEN(dev_priv) >= 4) {
|
||||
I915_WRITE_FW(DSPSURF(plane),
|
||||
intel_plane_ggtt_offset(plane_state) +
|
||||
crtc->dspaddr_offset);
|
||||
dspaddr_offset);
|
||||
I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
|
||||
I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
|
||||
} else {
|
||||
I915_WRITE_FW(DSPADDR(plane),
|
||||
intel_plane_ggtt_offset(plane_state) +
|
||||
crtc->dspaddr_offset);
|
||||
dspaddr_offset);
|
||||
}
|
||||
POSTING_READ_FW(reg);
|
||||
|
||||
|
@ -3544,100 +3541,6 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
|
|||
return plane_ctl;
|
||||
}
|
||||
|
||||
static void skylake_update_primary_plane(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
const struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
enum plane_id plane_id = plane->id;
|
||||
enum pipe pipe = plane->pipe;
|
||||
u32 plane_ctl = plane_state->ctl;
|
||||
unsigned int rotation = plane_state->base.rotation;
|
||||
u32 stride = skl_plane_stride(fb, 0, rotation);
|
||||
u32 aux_stride = skl_plane_stride(fb, 1, rotation);
|
||||
u32 surf_addr = plane_state->main.offset;
|
||||
int scaler_id = plane_state->scaler_id;
|
||||
int src_x = plane_state->main.x;
|
||||
int src_y = plane_state->main.y;
|
||||
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
|
||||
int src_h = drm_rect_height(&plane_state->base.src) >> 16;
|
||||
int dst_x = plane_state->base.dst.x1;
|
||||
int dst_y = plane_state->base.dst.y1;
|
||||
int dst_w = drm_rect_width(&plane_state->base.dst);
|
||||
int dst_h = drm_rect_height(&plane_state->base.dst);
|
||||
unsigned long irqflags;
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
dst_w--;
|
||||
dst_h--;
|
||||
|
||||
crtc->dspaddr_offset = surf_addr;
|
||||
|
||||
crtc->adjusted_x = src_x;
|
||||
crtc->adjusted_y = src_y;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
|
||||
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
|
||||
PLANE_COLOR_PIPE_GAMMA_ENABLE |
|
||||
PLANE_COLOR_PIPE_CSC_ENABLE |
|
||||
PLANE_COLOR_PLANE_GAMMA_DISABLE);
|
||||
}
|
||||
|
||||
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
|
||||
I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
|
||||
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
|
||||
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
|
||||
I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
|
||||
(plane_state->aux.offset - surf_addr) | aux_stride);
|
||||
I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
|
||||
(plane_state->aux.y << 16) | plane_state->aux.x);
|
||||
|
||||
if (scaler_id >= 0) {
|
||||
uint32_t ps_ctrl = 0;
|
||||
|
||||
WARN_ON(!dst_w || !dst_h);
|
||||
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) |
|
||||
crtc_state->scaler_state.scalers[scaler_id].mode;
|
||||
I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
|
||||
I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
|
||||
I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
|
||||
I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
|
||||
I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
|
||||
} else {
|
||||
I915_WRITE_FW(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
|
||||
}
|
||||
|
||||
I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
|
||||
intel_plane_ggtt_offset(plane_state) + surf_addr);
|
||||
|
||||
POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
static void skylake_disable_primary_plane(struct intel_plane *primary,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
|
||||
enum plane_id plane_id = primary->id;
|
||||
enum pipe pipe = primary->pipe;
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
|
||||
I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
|
||||
POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
static int
|
||||
__intel_display_resume(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
|
@ -6139,6 +6042,19 @@ struct intel_connector *intel_connector_alloc(void)
|
|||
return connector;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free the bits allocated by intel_connector_alloc.
|
||||
* This should only be used after intel_connector_alloc has returned
|
||||
* successfully, and before drm_connector_init returns successfully.
|
||||
* Otherwise the destroy callbacks for the connector and the state should
|
||||
* take care of proper cleanup/free
|
||||
*/
|
||||
void intel_connector_free(struct intel_connector *connector)
|
||||
{
|
||||
kfree(to_intel_digital_connector_state(connector->base.state));
|
||||
kfree(connector);
|
||||
}
|
||||
|
||||
/* Simple connector->get_hw_state implementation for encoders that support only
|
||||
* one connector and no cloning and hence the encoder state determines the state
|
||||
* of the connector. */
|
||||
|
@ -6522,11 +6438,9 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
|
|||
|
||||
crtc_state->dpll_hw_state.fp0 = fp;
|
||||
|
||||
crtc->lowfreq_avail = false;
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
|
||||
reduced_clock) {
|
||||
crtc_state->dpll_hw_state.fp1 = fp2;
|
||||
crtc->lowfreq_avail = true;
|
||||
} else {
|
||||
crtc_state->dpll_hw_state.fp1 = fp;
|
||||
}
|
||||
|
@ -7221,15 +7135,6 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
|
|||
}
|
||||
}
|
||||
|
||||
if (HAS_PIPE_CXSR(dev_priv)) {
|
||||
if (intel_crtc->lowfreq_avail) {
|
||||
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
|
||||
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
if (INTEL_GEN(dev_priv) < 4 ||
|
||||
intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
|
||||
|
@ -8365,8 +8270,6 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
|
|||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
|
||||
crtc->lowfreq_avail = false;
|
||||
|
||||
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
|
||||
if (!crtc_state->has_pch_encoder)
|
||||
return 0;
|
||||
|
@ -9025,8 +8928,6 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
|
|||
}
|
||||
}
|
||||
|
||||
crtc->lowfreq_avail = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -9846,7 +9747,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
|
|||
if (obj->base.size < mode->vdisplay * fb->pitches[0])
|
||||
return NULL;
|
||||
|
||||
drm_framebuffer_reference(fb);
|
||||
drm_framebuffer_get(fb);
|
||||
return fb;
|
||||
#else
|
||||
return NULL;
|
||||
|
@ -10027,7 +9928,7 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
|
|||
if (ret)
|
||||
goto fail;
|
||||
|
||||
drm_framebuffer_unreference(fb);
|
||||
drm_framebuffer_put(fb);
|
||||
|
||||
ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
|
||||
if (ret)
|
||||
|
@ -10662,6 +10563,52 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
|
|||
m_n->link_m, m_n->link_n, m_n->tu);
|
||||
}
|
||||
|
||||
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
|
||||
|
||||
static const char * const output_type_str[] = {
|
||||
OUTPUT_TYPE(UNUSED),
|
||||
OUTPUT_TYPE(ANALOG),
|
||||
OUTPUT_TYPE(DVO),
|
||||
OUTPUT_TYPE(SDVO),
|
||||
OUTPUT_TYPE(LVDS),
|
||||
OUTPUT_TYPE(TVOUT),
|
||||
OUTPUT_TYPE(HDMI),
|
||||
OUTPUT_TYPE(DP),
|
||||
OUTPUT_TYPE(EDP),
|
||||
OUTPUT_TYPE(DSI),
|
||||
OUTPUT_TYPE(UNKNOWN),
|
||||
OUTPUT_TYPE(DP_MST),
|
||||
};
|
||||
|
||||
#undef OUTPUT_TYPE
|
||||
|
||||
static void snprintf_output_types(char *buf, size_t len,
|
||||
unsigned int output_types)
|
||||
{
|
||||
char *str = buf;
|
||||
int i;
|
||||
|
||||
str[0] = '\0';
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
|
||||
int r;
|
||||
|
||||
if ((output_types & BIT(i)) == 0)
|
||||
continue;
|
||||
|
||||
r = snprintf(str, len, "%s%s",
|
||||
str != buf ? "," : "", output_type_str[i]);
|
||||
if (r >= len)
|
||||
break;
|
||||
str += r;
|
||||
len -= r;
|
||||
|
||||
output_types &= ~BIT(i);
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(output_types != 0);
|
||||
}
|
||||
|
||||
static void intel_dump_pipe_config(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
const char *context)
|
||||
|
@ -10672,10 +10619,15 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
|
|||
struct intel_plane *intel_plane;
|
||||
struct intel_plane_state *state;
|
||||
struct drm_framebuffer *fb;
|
||||
char buf[64];
|
||||
|
||||
DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
|
||||
crtc->base.base.id, crtc->base.name, context);
|
||||
|
||||
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
|
||||
DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
|
||||
buf, pipe_config->output_types);
|
||||
|
||||
DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
|
||||
transcoder_name(pipe_config->cpu_transcoder),
|
||||
pipe_config->pipe_bpp, pipe_config->dither);
|
||||
|
@ -13229,8 +13181,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
|
|||
num_formats = ARRAY_SIZE(skl_primary_formats);
|
||||
modifiers = skl_format_modifiers_ccs;
|
||||
|
||||
primary->update_plane = skylake_update_primary_plane;
|
||||
primary->disable_plane = skylake_disable_primary_plane;
|
||||
primary->update_plane = skl_update_plane;
|
||||
primary->disable_plane = skl_disable_plane;
|
||||
} else if (INTEL_GEN(dev_priv) >= 9) {
|
||||
intel_primary_formats = skl_primary_formats;
|
||||
num_formats = ARRAY_SIZE(skl_primary_formats);
|
||||
|
@ -13239,8 +13191,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
|
|||
else
|
||||
modifiers = skl_format_modifiers_noccs;
|
||||
|
||||
primary->update_plane = skylake_update_primary_plane;
|
||||
primary->disable_plane = skylake_disable_primary_plane;
|
||||
primary->update_plane = skl_update_plane;
|
||||
primary->disable_plane = skl_disable_plane;
|
||||
} else if (INTEL_GEN(dev_priv) >= 4) {
|
||||
intel_primary_formats = i965_primary_formats;
|
||||
num_formats = ARRAY_SIZE(i965_primary_formats);
|
||||
|
|
|
@ -1007,7 +1007,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
|
|||
else
|
||||
precharge = 5;
|
||||
|
||||
if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
|
||||
else
|
||||
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
|
||||
|
@ -1032,7 +1032,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
|
|||
DP_AUX_CH_CTL_DONE |
|
||||
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
|
||||
DP_AUX_CH_CTL_TIME_OUT_ERROR |
|
||||
DP_AUX_CH_CTL_TIME_OUT_1600us |
|
||||
DP_AUX_CH_CTL_TIME_OUT_MAX |
|
||||
DP_AUX_CH_CTL_RECEIVE_ERROR |
|
||||
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
|
||||
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
|
||||
|
@ -1832,6 +1832,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
|||
if (!HAS_DDI(dev_priv))
|
||||
intel_dp_set_clock(encoder, pipe_config);
|
||||
|
||||
intel_psr_compute_config(intel_dp, pipe_config);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -3153,9 +3155,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
|
|||
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
|
||||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
|
||||
else if (INTEL_GEN(dev_priv) >= 9) {
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
return intel_ddi_dp_voltage_max(encoder);
|
||||
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
|
|
|
@ -454,32 +454,52 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
|
|||
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_connector *intel_connector;
|
||||
struct drm_connector *connector;
|
||||
int i;
|
||||
enum pipe pipe;
|
||||
int ret;
|
||||
|
||||
intel_connector = intel_connector_alloc();
|
||||
if (!intel_connector)
|
||||
return NULL;
|
||||
|
||||
connector = &intel_connector->base;
|
||||
drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
|
||||
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_DisplayPort);
|
||||
if (ret) {
|
||||
intel_connector_free(intel_connector);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
|
||||
|
||||
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
|
||||
intel_connector->mst_port = intel_dp;
|
||||
intel_connector->port = port;
|
||||
|
||||
for (i = PIPE_A; i <= PIPE_C; i++) {
|
||||
drm_mode_connector_attach_encoder(&intel_connector->base,
|
||||
&intel_dp->mst_encoders[i]->base.base);
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
struct drm_encoder *enc =
|
||||
&intel_dp->mst_encoders[pipe]->base.base;
|
||||
|
||||
ret = drm_mode_connector_attach_encoder(&intel_connector->base,
|
||||
enc);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
|
||||
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
|
||||
|
||||
drm_mode_connector_set_path_property(connector, pathprop);
|
||||
ret = drm_mode_connector_set_path_property(connector, pathprop);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
return connector;
|
||||
|
||||
err:
|
||||
drm_connector_cleanup(connector);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void intel_dp_register_mst_connector(struct drm_connector *connector)
|
||||
|
@ -569,11 +589,12 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
|
|||
static bool
|
||||
intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
|
||||
{
|
||||
int i;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
|
||||
enum pipe pipe;
|
||||
|
||||
for (i = PIPE_A; i <= PIPE_C; i++)
|
||||
intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -718,6 +718,9 @@ struct intel_crtc_state {
|
|||
struct intel_link_m_n dp_m2_n2;
|
||||
bool has_drrs;
|
||||
|
||||
bool has_psr;
|
||||
bool has_psr2;
|
||||
|
||||
/*
|
||||
* Frequence the dpll for the port should run at. Differs from the
|
||||
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
|
||||
|
@ -800,18 +803,10 @@ struct intel_crtc {
|
|||
* some outputs connected to this crtc.
|
||||
*/
|
||||
bool active;
|
||||
bool lowfreq_avail;
|
||||
u8 plane_ids_mask;
|
||||
unsigned long long enabled_power_domains;
|
||||
struct intel_overlay *overlay;
|
||||
|
||||
/* Display surface base address adjustement for pageflips. Note that on
|
||||
* gen4+ this only adjusts up to a tile, offsets within a tile are
|
||||
* handled in the hw itself (with the TILEOFF register). */
|
||||
u32 dspaddr_offset;
|
||||
int adjusted_x;
|
||||
int adjusted_y;
|
||||
|
||||
struct intel_crtc_state *config;
|
||||
|
||||
/* global reset count when the last flip was submitted */
|
||||
|
@ -1066,7 +1061,7 @@ struct intel_digital_port {
|
|||
|
||||
void (*write_infoframe)(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
unsigned int type,
|
||||
const void *frame, ssize_t len);
|
||||
void (*set_infoframes)(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
|
@ -1360,6 +1355,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
|
|||
void intel_encoder_destroy(struct drm_encoder *encoder);
|
||||
int intel_connector_init(struct intel_connector *);
|
||||
struct intel_connector *intel_connector_alloc(void);
|
||||
void intel_connector_free(struct intel_connector *connector);
|
||||
bool intel_connector_get_hw_state(struct intel_connector *connector);
|
||||
void intel_connector_attach_encoder(struct intel_connector *connector,
|
||||
struct intel_encoder *encoder);
|
||||
|
@ -1764,6 +1760,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
|
|||
void intel_psr_init(struct drm_i915_private *dev_priv);
|
||||
void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_psr_compute_config(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
|
||||
/* intel_runtime_pm.c */
|
||||
int intel_power_domains_init(struct drm_i915_private *);
|
||||
|
@ -1923,6 +1921,10 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv);
|
||||
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
|
||||
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
|
||||
void skl_update_plane(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
|
||||
|
||||
/* intel_tv.c */
|
||||
void intel_tv_init(struct drm_i915_private *dev_priv);
|
||||
|
|
|
@ -1751,42 +1751,13 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
|
|||
else
|
||||
intel_encoder->crtc_mask = BIT(PIPE_B);
|
||||
|
||||
if (dev_priv->vbt.dsi.config->dual_link) {
|
||||
if (dev_priv->vbt.dsi.config->dual_link)
|
||||
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
|
||||
|
||||
switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
|
||||
case DL_DCS_PORT_A:
|
||||
intel_dsi->dcs_backlight_ports = BIT(PORT_A);
|
||||
break;
|
||||
case DL_DCS_PORT_C:
|
||||
intel_dsi->dcs_backlight_ports = BIT(PORT_C);
|
||||
break;
|
||||
default:
|
||||
case DL_DCS_PORT_A_AND_C:
|
||||
intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
|
||||
case DL_DCS_PORT_A:
|
||||
intel_dsi->dcs_cabc_ports = BIT(PORT_A);
|
||||
break;
|
||||
case DL_DCS_PORT_C:
|
||||
intel_dsi->dcs_cabc_ports = BIT(PORT_C);
|
||||
break;
|
||||
default:
|
||||
case DL_DCS_PORT_A_AND_C:
|
||||
intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
else
|
||||
intel_dsi->ports = BIT(port);
|
||||
intel_dsi->dcs_backlight_ports = BIT(port);
|
||||
intel_dsi->dcs_cabc_ports = BIT(port);
|
||||
}
|
||||
|
||||
if (!dev_priv->vbt.dsi.config->cabc_supported)
|
||||
intel_dsi->dcs_cabc_ports = 0;
|
||||
intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
|
||||
intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
|
||||
|
||||
/* Create a DSI host (and a device) for each port. */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <drm/drm_print.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_vgpu.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
#include "intel_lrc.h"
|
||||
|
||||
|
@ -386,10 +387,6 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
|
|||
|
||||
static bool csb_force_mmio(struct drm_i915_private *i915)
|
||||
{
|
||||
/* GVT emulation depends upon intercepting CSB mmio */
|
||||
if (intel_vgpu_active(i915))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* IOMMU adds unpredictable latency causing the CSB write (from the
|
||||
* GPU into the HWSP) to only be visible some time after the interrupt
|
||||
|
@ -398,6 +395,10 @@ static bool csb_force_mmio(struct drm_i915_private *i915)
|
|||
if (intel_vtd_active())
|
||||
return true;
|
||||
|
||||
/* Older GVT emulation depends upon intercepting CSB mmio */
|
||||
if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1625,8 +1626,10 @@ static void print_request(struct drm_printer *m,
|
|||
struct drm_i915_gem_request *rq,
|
||||
const char *prefix)
|
||||
{
|
||||
drm_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
|
||||
rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
|
||||
drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
|
||||
rq->global_seqno,
|
||||
i915_gem_request_completed(rq) ? "!" : "",
|
||||
rq->ctx->hw_id, rq->fence.seqno,
|
||||
rq->priotree.priority,
|
||||
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
|
||||
rq->timeline->common->name);
|
||||
|
@ -1634,8 +1637,9 @@ static void print_request(struct drm_printer *m,
|
|||
|
||||
void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
struct i915_gpu_error *error = &engine->i915->gpu_error;
|
||||
struct intel_breadcrumbs * const b = &engine->breadcrumbs;
|
||||
const struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
struct i915_gpu_error * const error = &engine->i915->gpu_error;
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
struct drm_i915_gem_request *rq;
|
||||
struct rb_node *rb;
|
||||
|
@ -1699,7 +1703,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
|
|||
|
||||
if (i915_modparams.enable_execlists) {
|
||||
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
u32 ptr, read, write;
|
||||
unsigned int idx;
|
||||
|
||||
|
@ -1747,17 +1750,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
|
|||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
spin_lock_irq(&engine->timeline->lock);
|
||||
for (rb = execlists->first; rb; rb = rb_next(rb)) {
|
||||
struct i915_priolist *p =
|
||||
rb_entry(rb, typeof(*p), node);
|
||||
|
||||
list_for_each_entry(rq, &p->requests,
|
||||
priotree.link)
|
||||
print_request(m, rq, "\t\tQ ");
|
||||
}
|
||||
spin_unlock_irq(&engine->timeline->lock);
|
||||
} else if (INTEL_GEN(dev_priv) > 6) {
|
||||
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
|
||||
I915_READ(RING_PP_DIR_BASE(engine)));
|
||||
|
@ -1767,6 +1759,18 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
|
|||
I915_READ(RING_PP_DIR_DCLV(engine)));
|
||||
}
|
||||
|
||||
spin_lock_irq(&engine->timeline->lock);
|
||||
list_for_each_entry(rq, &engine->timeline->requests, link)
|
||||
print_request(m, rq, "\t\tE ");
|
||||
for (rb = execlists->first; rb; rb = rb_next(rb)) {
|
||||
struct i915_priolist *p =
|
||||
rb_entry(rb, typeof(*p), node);
|
||||
|
||||
list_for_each_entry(rq, &p->requests, priotree.link)
|
||||
print_request(m, rq, "\t\tQ ");
|
||||
}
|
||||
spin_unlock_irq(&engine->timeline->lock);
|
||||
|
||||
spin_lock_irq(&b->rb_lock);
|
||||
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
|
||||
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
|
||||
|
|
|
@ -69,9 +69,9 @@ static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
|
|||
* address we program because it starts at the real start of the buffer, so we
|
||||
* have to take this into consideration here.
|
||||
*/
|
||||
static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
|
||||
static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
|
||||
{
|
||||
return crtc->base.y - crtc->adjusted_y;
|
||||
return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -727,8 +727,8 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
|
|||
|
||||
intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
|
||||
&effective_h);
|
||||
effective_w += crtc->adjusted_x;
|
||||
effective_h += crtc->adjusted_y;
|
||||
effective_w += fbc->state_cache.plane.adjusted_x;
|
||||
effective_h += fbc->state_cache.plane.adjusted_y;
|
||||
|
||||
return effective_w <= max_w && effective_h <= max_h;
|
||||
}
|
||||
|
@ -757,6 +757,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
|
|||
cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
|
||||
cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
|
||||
cache->plane.visible = plane_state->base.visible;
|
||||
cache->plane.adjusted_x = plane_state->main.x;
|
||||
cache->plane.adjusted_y = plane_state->main.y;
|
||||
cache->plane.y = plane_state->base.src.y1 >> 16;
|
||||
|
||||
if (!cache->plane.visible)
|
||||
return;
|
||||
|
@ -888,7 +891,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
|
|||
|
||||
params->crtc.pipe = crtc->pipe;
|
||||
params->crtc.plane = crtc->plane;
|
||||
params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
|
||||
params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
|
||||
|
||||
params->fb.format = cache->fb.format;
|
||||
params->fb.stride = cache->fb.stride;
|
||||
|
|
|
@ -189,7 +189,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
" releasing it\n",
|
||||
intel_fb->base.width, intel_fb->base.height,
|
||||
sizes->fb_width, sizes->fb_height);
|
||||
drm_framebuffer_unreference(&intel_fb->base);
|
||||
drm_framebuffer_put(&intel_fb->base);
|
||||
intel_fb = ifbdev->fb = NULL;
|
||||
}
|
||||
if (!intel_fb || WARN_ON(!intel_fb->obj)) {
|
||||
|
@ -627,7 +627,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
|||
ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
|
||||
ifbdev->fb = fb;
|
||||
|
||||
drm_framebuffer_reference(&ifbdev->fb->base);
|
||||
drm_framebuffer_get(&ifbdev->fb->base);
|
||||
|
||||
/* Final pass to check if any active pipes don't have fbs */
|
||||
for_each_crtc(dev, crtc) {
|
||||
|
|
|
@ -67,6 +67,99 @@ void intel_guc_init_early(struct intel_guc *guc)
|
|||
guc->notify = gen8_guc_raise_irq;
|
||||
}
|
||||
|
||||
static u32 get_gt_type(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* XXX: GT type based on PCI device ID? field seems unused by fw */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 get_core_family(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 gen = INTEL_GEN(dev_priv);
|
||||
|
||||
switch (gen) {
|
||||
case 9:
|
||||
return GUC_CORE_FAMILY_GEN9;
|
||||
|
||||
default:
|
||||
MISSING_CASE(gen);
|
||||
return GUC_CORE_FAMILY_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise the GuC parameter block before starting the firmware
|
||||
* transfer. These parameters are read by the firmware on startup
|
||||
* and cannot be changed thereafter.
|
||||
*/
|
||||
void intel_guc_init_params(struct intel_guc *guc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
u32 params[GUC_CTL_MAX_DWORDS];
|
||||
int i;
|
||||
|
||||
memset(params, 0, sizeof(params));
|
||||
|
||||
params[GUC_CTL_DEVICE_INFO] |=
|
||||
(get_gt_type(dev_priv) << GUC_CTL_GT_TYPE_SHIFT) |
|
||||
(get_core_family(dev_priv) << GUC_CTL_CORE_FAMILY_SHIFT);
|
||||
|
||||
/*
|
||||
* GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
|
||||
* second. This ARAR is calculated by:
|
||||
* Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
|
||||
*/
|
||||
params[GUC_CTL_ARAT_HIGH] = 0;
|
||||
params[GUC_CTL_ARAT_LOW] = 100000000;
|
||||
|
||||
params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
|
||||
|
||||
params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
|
||||
GUC_CTL_VCS2_ENABLED;
|
||||
|
||||
params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
|
||||
|
||||
if (i915_modparams.guc_log_level >= 0) {
|
||||
params[GUC_CTL_DEBUG] =
|
||||
i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
|
||||
} else {
|
||||
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
|
||||
}
|
||||
|
||||
/* If GuC submission is enabled, set up additional parameters here */
|
||||
if (i915_modparams.enable_guc_submission) {
|
||||
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
|
||||
u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
|
||||
u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
|
||||
|
||||
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
|
||||
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
|
||||
|
||||
pgs >>= PAGE_SHIFT;
|
||||
params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
|
||||
(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
|
||||
|
||||
params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
|
||||
|
||||
/* Unmask this bit to enable the GuC's internal scheduler */
|
||||
params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
|
||||
}
|
||||
|
||||
/*
|
||||
* All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
|
||||
* they are power context saved so it's ok to release forcewake
|
||||
* when we are done here and take it again at xfer time.
|
||||
*/
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
|
||||
|
||||
I915_WRITE(SOFT_SCRATCH(0), 0);
|
||||
|
||||
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
|
||||
I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
|
||||
}
|
||||
|
||||
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
|
||||
{
|
||||
WARN(1, "Unexpected send: action=%#x\n", *action);
|
||||
|
@ -263,3 +356,14 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
|
|||
i915_gem_object_put(obj);
|
||||
return vma;
|
||||
}
|
||||
|
||||
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 wopcm_size = GUC_WOPCM_TOP;
|
||||
|
||||
/* On BXT, the top of WOPCM is reserved for RC6 context */
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
|
||||
|
||||
return wopcm_size;
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define _INTEL_GUC_H_
|
||||
|
||||
#include "intel_uncore.h"
|
||||
#include "intel_guc_fw.h"
|
||||
#include "intel_guc_fwif.h"
|
||||
#include "intel_guc_ct.h"
|
||||
#include "intel_guc_log.h"
|
||||
|
@ -33,6 +34,11 @@
|
|||
#include "i915_guc_reg.h"
|
||||
#include "i915_vma.h"
|
||||
|
||||
/*
|
||||
* Top level structure of GuC. It handles firmware loading and manages client
|
||||
* pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
|
||||
* ExecList submission.
|
||||
*/
|
||||
struct intel_guc {
|
||||
struct intel_uc_fw fw;
|
||||
struct intel_guc_log log;
|
||||
|
@ -83,6 +89,12 @@ static inline void intel_guc_notify(struct intel_guc *guc)
|
|||
guc->notify(guc);
|
||||
}
|
||||
|
||||
/*
|
||||
* GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
|
||||
* which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
|
||||
* 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
|
||||
* used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
|
||||
*/
|
||||
static inline u32 guc_ggtt_offset(struct i915_vma *vma)
|
||||
{
|
||||
u32 offset = i915_ggtt_offset(vma);
|
||||
|
@ -95,6 +107,7 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
|
|||
|
||||
void intel_guc_init_early(struct intel_guc *guc);
|
||||
void intel_guc_init_send_regs(struct intel_guc *guc);
|
||||
void intel_guc_init_params(struct intel_guc *guc);
|
||||
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
|
||||
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
|
||||
int intel_guc_sample_forcewake(struct intel_guc *guc);
|
||||
|
@ -102,9 +115,6 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
|
|||
int intel_guc_suspend(struct drm_i915_private *dev_priv);
|
||||
int intel_guc_resume(struct drm_i915_private *dev_priv);
|
||||
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
|
||||
|
||||
int intel_guc_select_fw(struct intel_guc *guc);
|
||||
int intel_guc_init_hw(struct intel_guc *guc);
|
||||
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -26,31 +26,9 @@
|
|||
* Dave Gordon <david.s.gordon@intel.com>
|
||||
* Alex Dai <yu.dai@intel.com>
|
||||
*/
|
||||
#include "i915_drv.h"
|
||||
#include "intel_uc.h"
|
||||
|
||||
/**
|
||||
* DOC: GuC-specific firmware loader
|
||||
*
|
||||
* intel_guc:
|
||||
* Top level structure of guc. It handles firmware loading and manages client
|
||||
* pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
|
||||
* ExecList submission.
|
||||
*
|
||||
* Firmware versioning:
|
||||
* The firmware build process will generate a version header file with major and
|
||||
* minor version defined. The versions are built into CSS header of firmware.
|
||||
* i915 kernel driver set the minimal firmware version required per platform.
|
||||
* The firmware installation package will install (symbolic link) proper version
|
||||
* of firmware.
|
||||
*
|
||||
* GuC address space:
|
||||
* GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
|
||||
* which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
|
||||
* 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
|
||||
* used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
|
||||
*
|
||||
*/
|
||||
#include "intel_guc_fw.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define SKL_FW_MAJOR 6
|
||||
#define SKL_FW_MINOR 1
|
||||
|
@ -78,88 +56,45 @@ MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
|
|||
|
||||
#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
|
||||
|
||||
|
||||
static u32 get_gttype(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* XXX: GT type based on PCI device ID? field seems unused by fw */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 get_core_family(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 gen = INTEL_GEN(dev_priv);
|
||||
|
||||
switch (gen) {
|
||||
case 9:
|
||||
return GUC_CORE_FAMILY_GEN9;
|
||||
|
||||
default:
|
||||
MISSING_CASE(gen);
|
||||
return GUC_CORE_FAMILY_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise the GuC parameter block before starting the firmware
|
||||
* transfer. These parameters are read by the firmware on startup
|
||||
* and cannot be changed thereafter.
|
||||
/**
|
||||
* intel_guc_fw_select() - selects GuC firmware for uploading
|
||||
*
|
||||
* @guc: intel_guc struct
|
||||
*
|
||||
* Return: zero when we know firmware, non-zero in other case
|
||||
*/
|
||||
static void guc_params_init(struct drm_i915_private *dev_priv)
|
||||
int intel_guc_fw_select(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
u32 params[GUC_CTL_MAX_DWORDS];
|
||||
int i;
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
|
||||
|
||||
params[GUC_CTL_DEVICE_INFO] |=
|
||||
(get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
|
||||
(get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
|
||||
|
||||
/*
|
||||
* GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
|
||||
* second. This ARAR is calculated by:
|
||||
* Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
|
||||
*/
|
||||
params[GUC_CTL_ARAT_HIGH] = 0;
|
||||
params[GUC_CTL_ARAT_LOW] = 100000000;
|
||||
|
||||
params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
|
||||
|
||||
params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
|
||||
GUC_CTL_VCS2_ENABLED;
|
||||
|
||||
params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
|
||||
|
||||
if (i915_modparams.guc_log_level >= 0) {
|
||||
params[GUC_CTL_DEBUG] =
|
||||
i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
|
||||
} else
|
||||
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
|
||||
|
||||
/* If GuC submission is enabled, set up additional parameters here */
|
||||
if (i915_modparams.enable_guc_submission) {
|
||||
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
|
||||
u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
|
||||
u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
|
||||
|
||||
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
|
||||
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
|
||||
|
||||
pgs >>= PAGE_SHIFT;
|
||||
params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
|
||||
(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
|
||||
|
||||
params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
|
||||
|
||||
/* Unmask this bit to enable the GuC's internal scheduler */
|
||||
params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
|
||||
if (i915_modparams.guc_firmware_path) {
|
||||
guc->fw.path = i915_modparams.guc_firmware_path;
|
||||
guc->fw.major_ver_wanted = 0;
|
||||
guc->fw.minor_ver_wanted = 0;
|
||||
} else if (IS_SKYLAKE(dev_priv)) {
|
||||
guc->fw.path = I915_SKL_GUC_UCODE;
|
||||
guc->fw.major_ver_wanted = SKL_FW_MAJOR;
|
||||
guc->fw.minor_ver_wanted = SKL_FW_MINOR;
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
guc->fw.path = I915_BXT_GUC_UCODE;
|
||||
guc->fw.major_ver_wanted = BXT_FW_MAJOR;
|
||||
guc->fw.minor_ver_wanted = BXT_FW_MINOR;
|
||||
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
|
||||
guc->fw.path = I915_KBL_GUC_UCODE;
|
||||
guc->fw.major_ver_wanted = KBL_FW_MAJOR;
|
||||
guc->fw.minor_ver_wanted = KBL_FW_MINOR;
|
||||
} else if (IS_GEMINILAKE(dev_priv)) {
|
||||
guc->fw.path = I915_GLK_GUC_UCODE;
|
||||
guc->fw.major_ver_wanted = GLK_FW_MAJOR;
|
||||
guc->fw.minor_ver_wanted = GLK_FW_MINOR;
|
||||
} else {
|
||||
DRM_ERROR("No GuC firmware known for platform with GuC!\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
I915_WRITE(SOFT_SCRATCH(0), 0);
|
||||
|
||||
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
|
||||
I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -250,38 +185,16 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
|
|||
return ret;
|
||||
}
|
||||
|
||||
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 wopcm_size = GUC_WOPCM_TOP;
|
||||
|
||||
/* On BXT, the top of WOPCM is reserved for RC6 context */
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
|
||||
|
||||
return wopcm_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Load the GuC firmware blob into the MinuteIA.
|
||||
*/
|
||||
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
|
||||
static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
|
||||
{
|
||||
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
|
||||
struct i915_vma *vma;
|
||||
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(guc_fw->obj, false);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(guc_fw->obj, NULL, 0, 0,
|
||||
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
|
||||
if (IS_ERR(vma)) {
|
||||
DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
|
||||
return PTR_ERR(vma);
|
||||
}
|
||||
GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
|
||||
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
|
@ -312,23 +225,15 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
|
|||
I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
|
||||
}
|
||||
|
||||
guc_params_init(dev_priv);
|
||||
|
||||
ret = guc_ucode_xfer_dma(dev_priv, vma);
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
/*
|
||||
* We keep the object pages for reuse during resume. But we can unpin it
|
||||
* now that DMA has completed, so it doesn't continue to take up space.
|
||||
*/
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_guc_init_hw() - finish preparing the GuC for activity
|
||||
* intel_guc_fw_upload() - finish preparing the GuC for activity
|
||||
* @guc: intel_guc structure
|
||||
*
|
||||
* Called during driver loading and also after a GPU reset.
|
||||
|
@ -340,78 +245,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
|
|||
*
|
||||
* Return: non-zero code on error
|
||||
*/
|
||||
int intel_guc_init_hw(struct intel_guc *guc)
|
||||
int intel_guc_fw_upload(struct intel_guc *guc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
const char *fw_path = guc->fw.path;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
|
||||
fw_path,
|
||||
intel_uc_fw_status_repr(guc->fw.fetch_status),
|
||||
intel_uc_fw_status_repr(guc->fw.load_status));
|
||||
|
||||
if (guc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
|
||||
return -EIO;
|
||||
|
||||
guc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
|
||||
|
||||
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
|
||||
intel_uc_fw_status_repr(guc->fw.fetch_status),
|
||||
intel_uc_fw_status_repr(guc->fw.load_status));
|
||||
|
||||
ret = guc_ucode_xfer(dev_priv);
|
||||
|
||||
if (ret)
|
||||
return -EAGAIN;
|
||||
|
||||
guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
|
||||
|
||||
DRM_INFO("GuC %s (firmware %s [version %u.%u])\n",
|
||||
i915_modparams.enable_guc_submission ? "submission enabled" :
|
||||
"loaded",
|
||||
guc->fw.path,
|
||||
guc->fw.major_ver_found, guc->fw.minor_ver_found);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_guc_select_fw() - selects GuC firmware for loading
|
||||
* @guc: intel_guc struct
|
||||
*
|
||||
* Return: zero when we know firmware, non-zero in other case
|
||||
*/
|
||||
int intel_guc_select_fw(struct intel_guc *guc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
|
||||
intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
|
||||
|
||||
if (i915_modparams.guc_firmware_path) {
|
||||
guc->fw.path = i915_modparams.guc_firmware_path;
|
||||
guc->fw.major_ver_wanted = 0;
|
||||
guc->fw.minor_ver_wanted = 0;
|
||||
} else if (IS_SKYLAKE(dev_priv)) {
|
||||
guc->fw.path = I915_SKL_GUC_UCODE;
|
||||
guc->fw.major_ver_wanted = SKL_FW_MAJOR;
|
||||
guc->fw.minor_ver_wanted = SKL_FW_MINOR;
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
guc->fw.path = I915_BXT_GUC_UCODE;
|
||||
guc->fw.major_ver_wanted = BXT_FW_MAJOR;
|
||||
guc->fw.minor_ver_wanted = BXT_FW_MINOR;
|
||||
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
|
||||
guc->fw.path = I915_KBL_GUC_UCODE;
|
||||
guc->fw.major_ver_wanted = KBL_FW_MAJOR;
|
||||
guc->fw.minor_ver_wanted = KBL_FW_MINOR;
|
||||
} else if (IS_GEMINILAKE(dev_priv)) {
|
||||
guc->fw.path = I915_GLK_GUC_UCODE;
|
||||
guc->fw.major_ver_wanted = GLK_FW_MAJOR;
|
||||
guc->fw.minor_ver_wanted = GLK_FW_MINOR;
|
||||
} else {
|
||||
DRM_ERROR("No GuC firmware known for platform with GuC!\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return intel_uc_fw_upload(&guc->fw, guc_ucode_xfer);
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Copyright © 2017 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_GUC_FW_H_
|
||||
#define _INTEL_GUC_FW_H_
|
||||
|
||||
struct intel_guc;
|
||||
|
||||
int intel_guc_fw_select(struct intel_guc *guc);
|
||||
int intel_guc_fw_upload(struct intel_guc *guc);
|
||||
|
||||
#endif
|
|
@ -82,8 +82,8 @@
|
|||
#define GUC_CTL_ARAT_LOW 2
|
||||
|
||||
#define GUC_CTL_DEVICE_INFO 3
|
||||
#define GUC_CTL_GTTYPE_SHIFT 0
|
||||
#define GUC_CTL_COREFAMILY_SHIFT 7
|
||||
#define GUC_CTL_GT_TYPE_SHIFT 0
|
||||
#define GUC_CTL_CORE_FAMILY_SHIFT 7
|
||||
|
||||
#define GUC_CTL_LOG_PARAMS 4
|
||||
#define GUC_LOG_VALID (1 << 0)
|
||||
|
|
|
@ -70,7 +70,7 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
|
|||
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
|
||||
}
|
||||
|
||||
static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
|
||||
static u32 g4x_infoframe_index(unsigned int type)
|
||||
{
|
||||
switch (type) {
|
||||
case HDMI_INFOFRAME_TYPE_AVI:
|
||||
|
@ -85,7 +85,7 @@ static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
|
|||
}
|
||||
}
|
||||
|
||||
static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
|
||||
static u32 g4x_infoframe_enable(unsigned int type)
|
||||
{
|
||||
switch (type) {
|
||||
case HDMI_INFOFRAME_TYPE_AVI:
|
||||
|
@ -100,9 +100,11 @@ static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
|
|||
}
|
||||
}
|
||||
|
||||
static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
|
||||
static u32 hsw_infoframe_enable(unsigned int type)
|
||||
{
|
||||
switch (type) {
|
||||
case DP_SDP_VSC:
|
||||
return VIDEO_DIP_ENABLE_VSC_HSW;
|
||||
case HDMI_INFOFRAME_TYPE_AVI:
|
||||
return VIDEO_DIP_ENABLE_AVI_HSW;
|
||||
case HDMI_INFOFRAME_TYPE_SPD:
|
||||
|
@ -118,10 +120,12 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
|
|||
static i915_reg_t
|
||||
hsw_dip_data_reg(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder,
|
||||
enum hdmi_infoframe_type type,
|
||||
unsigned int type,
|
||||
int i)
|
||||
{
|
||||
switch (type) {
|
||||
case DP_SDP_VSC:
|
||||
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
|
||||
case HDMI_INFOFRAME_TYPE_AVI:
|
||||
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
|
||||
case HDMI_INFOFRAME_TYPE_SPD:
|
||||
|
@ -136,7 +140,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
|
|||
|
||||
static void g4x_write_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
unsigned int type,
|
||||
const void *frame, ssize_t len)
|
||||
{
|
||||
const uint32_t *data = frame;
|
||||
|
@ -191,7 +195,7 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
|
|||
|
||||
static void ibx_write_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
unsigned int type,
|
||||
const void *frame, ssize_t len)
|
||||
{
|
||||
const uint32_t *data = frame;
|
||||
|
@ -251,7 +255,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
|
|||
|
||||
static void cpt_write_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
unsigned int type,
|
||||
const void *frame, ssize_t len)
|
||||
{
|
||||
const uint32_t *data = frame;
|
||||
|
@ -309,7 +313,7 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
|
|||
|
||||
static void vlv_write_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
unsigned int type,
|
||||
const void *frame, ssize_t len)
|
||||
{
|
||||
const uint32_t *data = frame;
|
||||
|
@ -368,7 +372,7 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
|
|||
|
||||
static void hsw_write_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
unsigned int type,
|
||||
const void *frame, ssize_t len)
|
||||
{
|
||||
const uint32_t *data = frame;
|
||||
|
@ -377,6 +381,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
|
|||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
|
||||
i915_reg_t data_reg;
|
||||
int data_size = type == DP_SDP_VSC ?
|
||||
VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
|
||||
int i;
|
||||
u32 val = I915_READ(ctl_reg);
|
||||
|
||||
|
@ -392,7 +398,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
|
|||
data++;
|
||||
}
|
||||
/* Write every possible data byte to force correct ECC calculation. */
|
||||
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
|
||||
for (; i < data_size; i += 4)
|
||||
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
|
||||
type, i >> 2), 0);
|
||||
mmiowb();
|
||||
|
|
|
@ -77,73 +77,6 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
|
|||
#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
|
||||
GLK_HUC_FW_MINOR, GLK_BLD_NUM)
|
||||
|
||||
/**
|
||||
* huc_ucode_xfer() - DMA's the firmware
|
||||
* @dev_priv: the drm_i915_private device
|
||||
*
|
||||
* Transfer the firmware image to RAM for execution by the microcontroller.
|
||||
*
|
||||
* Return: 0 on success, non-zero on failure
|
||||
*/
|
||||
static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
|
||||
struct i915_vma *vma;
|
||||
unsigned long offset = 0;
|
||||
u32 size;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(huc_fw->obj, false);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(huc_fw->obj, NULL, 0, 0,
|
||||
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
|
||||
if (IS_ERR(vma)) {
|
||||
DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
|
||||
return PTR_ERR(vma);
|
||||
}
|
||||
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
/* Set the source address for the uCode */
|
||||
offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
|
||||
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
|
||||
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
|
||||
|
||||
/* Hardware doesn't look at destination address for HuC. Set it to 0,
|
||||
* but still program the correct address space.
|
||||
*/
|
||||
I915_WRITE(DMA_ADDR_1_LOW, 0);
|
||||
I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
|
||||
|
||||
size = huc_fw->header_size + huc_fw->ucode_size;
|
||||
I915_WRITE(DMA_COPY_SIZE, size);
|
||||
|
||||
/* Start the DMA */
|
||||
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
|
||||
|
||||
/* Wait for DMA to finish */
|
||||
ret = wait_for((I915_READ(DMA_CTRL) & START_DMA) == 0, 100);
|
||||
|
||||
DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
|
||||
|
||||
/* Disable the bits once DMA is over */
|
||||
I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
/*
|
||||
* We keep the object pages for reuse during resume. But we can unpin it
|
||||
* now that DMA has completed, so it doesn't continue to take up space.
|
||||
*/
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_huc_select_fw() - selects HuC firmware for loading
|
||||
* @huc: intel_huc struct
|
||||
|
@ -180,6 +113,56 @@ void intel_huc_select_fw(struct intel_huc *huc)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* huc_ucode_xfer() - DMA's the firmware
|
||||
* @dev_priv: the drm_i915_private device
|
||||
*
|
||||
* Transfer the firmware image to RAM for execution by the microcontroller.
|
||||
*
|
||||
* Return: 0 on success, non-zero on failure
|
||||
*/
|
||||
static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
|
||||
{
|
||||
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
|
||||
struct drm_i915_private *dev_priv = huc_to_i915(huc);
|
||||
unsigned long offset = 0;
|
||||
u32 size;
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
|
||||
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
/* Set the source address for the uCode */
|
||||
offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
|
||||
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
|
||||
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
|
||||
|
||||
/* Hardware doesn't look at destination address for HuC. Set it to 0,
|
||||
* but still program the correct address space.
|
||||
*/
|
||||
I915_WRITE(DMA_ADDR_1_LOW, 0);
|
||||
I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
|
||||
|
||||
size = huc_fw->header_size + huc_fw->ucode_size;
|
||||
I915_WRITE(DMA_COPY_SIZE, size);
|
||||
|
||||
/* Start the DMA */
|
||||
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
|
||||
|
||||
/* Wait for DMA to finish */
|
||||
ret = wait_for((I915_READ(DMA_CTRL) & START_DMA) == 0, 100);
|
||||
|
||||
DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
|
||||
|
||||
/* Disable the bits once DMA is over */
|
||||
I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_huc_init_hw() - load HuC uCode to device
|
||||
* @huc: intel_huc structure
|
||||
|
@ -194,33 +177,7 @@ void intel_huc_select_fw(struct intel_huc *huc)
|
|||
*/
|
||||
void intel_huc_init_hw(struct intel_huc *huc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = huc_to_i915(huc);
|
||||
int err;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
|
||||
huc->fw.path,
|
||||
intel_uc_fw_status_repr(huc->fw.fetch_status),
|
||||
intel_uc_fw_status_repr(huc->fw.load_status));
|
||||
|
||||
if (huc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
|
||||
return;
|
||||
|
||||
huc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
|
||||
|
||||
err = huc_ucode_xfer(dev_priv);
|
||||
|
||||
huc->fw.load_status = err ?
|
||||
INTEL_UC_FIRMWARE_FAIL : INTEL_UC_FIRMWARE_SUCCESS;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
|
||||
huc->fw.path,
|
||||
intel_uc_fw_status_repr(huc->fw.fetch_status),
|
||||
intel_uc_fw_status_repr(huc->fw.load_status));
|
||||
|
||||
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
|
||||
DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
|
||||
|
||||
return;
|
||||
intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -793,7 +793,6 @@ static void intel_lrc_irq_handler(unsigned long data)
|
|||
&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
|
||||
unsigned int head, tail;
|
||||
|
||||
/* However GVT emulation depends upon intercepting CSB mmio */
|
||||
if (unlikely(execlists->csb_use_mmio)) {
|
||||
buf = (u32 * __force)
|
||||
(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
|
||||
|
@ -1094,6 +1093,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
|
|||
i915_ggtt_offset(ce->ring->vma);
|
||||
|
||||
ce->state->obj->mm.dirty = true;
|
||||
ce->state->obj->pin_global++;
|
||||
|
||||
i915_gem_context_get(ctx);
|
||||
out:
|
||||
|
@ -1121,6 +1121,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
|
|||
|
||||
intel_ring_unpin(ce->ring);
|
||||
|
||||
ce->state->obj->pin_global--;
|
||||
i915_gem_object_unpin_map(ce->state->obj);
|
||||
i915_vma_unpin(ce->state);
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
|
|||
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
|
||||
|
||||
if (drm_lspcon_get_mode(adapter, ¤t_mode)) {
|
||||
DRM_ERROR("Error reading LSPCON mode\n");
|
||||
DRM_DEBUG_KMS("Error reading LSPCON mode\n");
|
||||
return DRM_LSPCON_MODE_INVALID;
|
||||
}
|
||||
return current_mode;
|
||||
|
@ -68,16 +68,15 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
|
|||
enum drm_lspcon_mode current_mode;
|
||||
|
||||
current_mode = lspcon_get_current_mode(lspcon);
|
||||
if (current_mode == mode || current_mode == DRM_LSPCON_MODE_INVALID)
|
||||
if (current_mode == mode)
|
||||
goto out;
|
||||
|
||||
DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
|
||||
lspcon_mode_name(mode));
|
||||
|
||||
wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode ||
|
||||
current_mode == DRM_LSPCON_MODE_INVALID, 100);
|
||||
wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
|
||||
if (current_mode != mode)
|
||||
DRM_DEBUG_KMS("LSPCON mode hasn't settled\n");
|
||||
DRM_ERROR("LSPCON mode hasn't settled\n");
|
||||
|
||||
out:
|
||||
DRM_DEBUG_KMS("Current LSPCON mode %s\n",
|
||||
|
@ -133,6 +132,7 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
|
|||
|
||||
static bool lspcon_probe(struct intel_lspcon *lspcon)
|
||||
{
|
||||
int retry;
|
||||
enum drm_dp_dual_mode_type adaptor_type;
|
||||
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
|
||||
enum drm_lspcon_mode expected_mode;
|
||||
|
@ -141,10 +141,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
|
|||
DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS;
|
||||
|
||||
/* Lets probe the adaptor and check its type */
|
||||
adaptor_type = drm_dp_dual_mode_detect(adapter);
|
||||
for (retry = 0; retry < 6; retry++) {
|
||||
if (retry)
|
||||
usleep_range(500, 1000);
|
||||
|
||||
adaptor_type = drm_dp_dual_mode_detect(adapter);
|
||||
if (adaptor_type == DRM_DP_DUAL_MODE_LSPCON)
|
||||
break;
|
||||
}
|
||||
|
||||
if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
|
||||
DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
|
||||
drm_dp_get_dual_mode_type_name(adaptor_type));
|
||||
drm_dp_get_dual_mode_type_name(adaptor_type));
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -6591,7 +6591,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
uint32_t rc6_mask = 0;
|
||||
u32 rc6_mode, rc6_mask = 0;
|
||||
|
||||
/* 1a: Software RC state - RC0 */
|
||||
I915_WRITE(GEN6_RC_STATE, 0);
|
||||
|
@ -6629,8 +6629,15 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
|
|||
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
|
||||
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
|
||||
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
|
||||
|
||||
/* WaRsUseTimeoutMode:cnl (pre-prod) */
|
||||
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
|
||||
rc6_mode = GEN7_RC_CTL_TO_MODE;
|
||||
else
|
||||
rc6_mode = GEN6_RC_CTL_EI_MODE(1);
|
||||
|
||||
I915_WRITE(GEN6_RC_CONTROL,
|
||||
GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_EI_MODE(1) | rc6_mask);
|
||||
GEN6_RC_CTL_HW_ENABLE | rc6_mode | rc6_mask);
|
||||
|
||||
/*
|
||||
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
|
||||
|
|
|
@ -58,6 +58,9 @@
|
|||
|
||||
static bool is_edp_psr(struct intel_dp *intel_dp)
|
||||
{
|
||||
if (!intel_dp_is_edp(intel_dp))
|
||||
return false;
|
||||
|
||||
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
|
||||
}
|
||||
|
||||
|
@ -72,37 +75,6 @@ static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
|
|||
(val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
|
||||
}
|
||||
|
||||
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
|
||||
const struct edp_vsc_psr *vsc_psr)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
|
||||
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
|
||||
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
|
||||
uint32_t *data = (uint32_t *) vsc_psr;
|
||||
unsigned int i;
|
||||
|
||||
/* As per BSPec (Pipe Video Data Island Packet), we need to disable
|
||||
the video DIP being updated before program video DIP data buffer
|
||||
registers for DIP being updated. */
|
||||
I915_WRITE(ctl_reg, 0);
|
||||
POSTING_READ(ctl_reg);
|
||||
|
||||
for (i = 0; i < sizeof(*vsc_psr); i += 4) {
|
||||
I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
|
||||
i >> 2), *data);
|
||||
data++;
|
||||
}
|
||||
for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
|
||||
I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
|
||||
i >> 2), 0);
|
||||
|
||||
I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
|
||||
POSTING_READ(ctl_reg);
|
||||
}
|
||||
|
||||
static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
|
@ -149,7 +121,8 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
|
|||
psr_vsc.sdp_header.HB3 = 0x8;
|
||||
}
|
||||
|
||||
intel_psr_write_vsc(intel_dp, &psr_vsc);
|
||||
intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
|
||||
DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
|
||||
}
|
||||
|
||||
static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
|
@ -376,22 +349,25 @@ static void hsw_psr_activate(struct intel_dp *intel_dp)
|
|||
hsw_activate_psr1(intel_dp);
|
||||
}
|
||||
|
||||
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
|
||||
void intel_psr_compute_config(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc = dig_port->base.base.crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&intel_crtc->config->base.adjusted_mode;
|
||||
&crtc_state->base.adjusted_mode;
|
||||
int psr_setup_time;
|
||||
|
||||
lockdep_assert_held(&dev_priv->psr.lock);
|
||||
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
|
||||
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
|
||||
if (!HAS_PSR(dev_priv))
|
||||
return;
|
||||
|
||||
dev_priv->psr.source_ok = false;
|
||||
if (!is_edp_psr(intel_dp))
|
||||
return;
|
||||
|
||||
if (!i915_modparams.enable_psr) {
|
||||
DRM_DEBUG_KMS("PSR disable by flag\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* HSW spec explicitly says PSR is tied to port A.
|
||||
|
@ -402,66 +378,70 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
|
|||
*/
|
||||
if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!i915_modparams.enable_psr) {
|
||||
DRM_DEBUG_KMS("PSR disable by flag\n");
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
|
||||
!dev_priv->psr.link_standby) {
|
||||
DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (IS_HASWELL(dev_priv) &&
|
||||
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
|
||||
I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
|
||||
S3D_ENABLE) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (IS_HASWELL(dev_priv) &&
|
||||
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
|
||||
if (psr_setup_time < 0) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
|
||||
intel_dp->psr_dpcd[1]);
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
|
||||
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
|
||||
psr_setup_time);
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME psr2_support is messed up. It's both computed
|
||||
* dynamically during PSR enable, and extracted from sink
|
||||
* caps during eDP detection.
|
||||
*/
|
||||
if (!dev_priv->psr.psr2_support) {
|
||||
crtc_state->has_psr = true;
|
||||
return;
|
||||
}
|
||||
|
||||
/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
|
||||
if (dev_priv->psr.psr2_support &&
|
||||
(intel_crtc->config->pipe_src_w > 3200 ||
|
||||
intel_crtc->config->pipe_src_h > 2000)) {
|
||||
dev_priv->psr.psr2_support = false;
|
||||
return false;
|
||||
if (adjusted_mode->crtc_hdisplay > 3200 ||
|
||||
adjusted_mode->crtc_vdisplay > 2000) {
|
||||
DRM_DEBUG_KMS("PSR2 disabled, panel resolution too big\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME:enable psr2 only for y-cordinate psr2 panels
|
||||
* After gtc implementation , remove this restriction.
|
||||
*/
|
||||
if (!dev_priv->psr.y_cord_support && dev_priv->psr.psr2_support) {
|
||||
if (!dev_priv->psr.y_cord_support) {
|
||||
DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
dev_priv->psr.source_ok = true;
|
||||
return true;
|
||||
crtc_state->has_psr = true;
|
||||
crtc_state->has_psr2 = true;
|
||||
}
|
||||
|
||||
static void intel_psr_activate(struct intel_dp *intel_dp)
|
||||
|
@ -531,14 +511,9 @@ void intel_psr_enable(struct intel_dp *intel_dp,
|
|||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (!HAS_PSR(dev_priv))
|
||||
if (!crtc_state->has_psr)
|
||||
return;
|
||||
|
||||
if (!is_edp_psr(intel_dp)) {
|
||||
DRM_DEBUG_KMS("PSR not supported by this panel\n");
|
||||
return;
|
||||
}
|
||||
|
||||
WARN_ON(dev_priv->drrs.dp);
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
if (dev_priv->psr.enabled) {
|
||||
|
@ -546,8 +521,8 @@ void intel_psr_enable(struct intel_dp *intel_dp,
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
if (!intel_psr_match_conditions(intel_dp))
|
||||
goto unlock;
|
||||
dev_priv->psr.psr2_support = crtc_state->has_psr2;
|
||||
dev_priv->psr.source_ok = true;
|
||||
|
||||
dev_priv->psr.busy_frontbuffer_bits = 0;
|
||||
|
||||
|
@ -668,7 +643,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
|
|||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (!HAS_PSR(dev_priv))
|
||||
if (!old_crtc_state->has_psr)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
|
|
|
@ -484,11 +484,6 @@ static bool stop_ring(struct intel_engine_cs *engine)
|
|||
I915_WRITE_HEAD(engine, 0);
|
||||
I915_WRITE_TAIL(engine, 0);
|
||||
|
||||
if (INTEL_GEN(dev_priv) > 2) {
|
||||
(void)I915_READ_CTL(engine);
|
||||
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
|
||||
}
|
||||
|
||||
return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
|
||||
}
|
||||
|
||||
|
@ -570,6 +565,9 @@ static int init_ring_common(struct intel_engine_cs *engine)
|
|||
|
||||
intel_engine_init_hangcheck(engine);
|
||||
|
||||
if (INTEL_GEN(dev_priv) > 2)
|
||||
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
|
||||
|
||||
out:
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
|
@ -1246,6 +1244,8 @@ int intel_ring_pin(struct intel_ring *ring,
|
|||
if (IS_ERR(addr))
|
||||
goto err;
|
||||
|
||||
vma->obj->pin_global++;
|
||||
|
||||
ring->vaddr = addr;
|
||||
return 0;
|
||||
|
||||
|
@ -1277,6 +1277,7 @@ void intel_ring_unpin(struct intel_ring *ring)
|
|||
i915_gem_object_unpin_map(ring->vma->obj);
|
||||
ring->vaddr = NULL;
|
||||
|
||||
ring->vma->obj->pin_global--;
|
||||
i915_vma_unpin(ring->vma);
|
||||
}
|
||||
|
||||
|
@ -1441,6 +1442,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
|
|||
goto err;
|
||||
|
||||
ce->state->obj->mm.dirty = true;
|
||||
ce->state->obj->pin_global++;
|
||||
}
|
||||
|
||||
/* The kernel context is only used as a placeholder for flushing the
|
||||
|
@ -1475,8 +1477,10 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
|
|||
if (--ce->pin_count)
|
||||
return;
|
||||
|
||||
if (ce->state)
|
||||
if (ce->state) {
|
||||
ce->state->obj->pin_global--;
|
||||
i915_vma_unpin(ce->state);
|
||||
}
|
||||
|
||||
i915_gem_context_put(ctx);
|
||||
}
|
||||
|
|
|
@ -230,7 +230,7 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
skl_update_plane(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state)
|
||||
|
@ -311,7 +311,7 @@ skl_update_plane(struct intel_plane *plane,
|
|||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
|
||||
|
|
|
@ -68,7 +68,7 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
|
|||
if (HAS_HUC_UCODE(dev_priv))
|
||||
intel_huc_select_fw(&dev_priv->huc);
|
||||
|
||||
if (intel_guc_select_fw(&dev_priv->guc))
|
||||
if (intel_guc_fw_select(&dev_priv->guc))
|
||||
i915_modparams.enable_guc_loading = 0;
|
||||
}
|
||||
|
||||
|
@ -195,7 +195,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
|
|||
goto err_submission;
|
||||
|
||||
intel_huc_init_hw(&dev_priv->huc);
|
||||
ret = intel_guc_init_hw(&dev_priv->guc);
|
||||
intel_guc_init_params(guc);
|
||||
ret = intel_guc_fw_upload(guc);
|
||||
if (ret == 0 || ret != -EAGAIN)
|
||||
break;
|
||||
|
||||
|
@ -221,6 +222,12 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
|
|||
goto err_interrupts;
|
||||
}
|
||||
|
||||
dev_info(dev_priv->drm.dev, "GuC %s (firmware %s [version %u.%u])\n",
|
||||
i915_modparams.enable_guc_submission ? "submission enabled" :
|
||||
"loaded",
|
||||
guc->fw.path,
|
||||
guc->fw.major_ver_found, guc->fw.minor_ver_found);
|
||||
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -243,12 +250,14 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
|
|||
err_guc:
|
||||
i915_ggtt_disable_guc(dev_priv);
|
||||
|
||||
DRM_ERROR("GuC init failed\n");
|
||||
if (i915_modparams.enable_guc_loading > 1 ||
|
||||
i915_modparams.enable_guc_submission > 1)
|
||||
i915_modparams.enable_guc_submission > 1) {
|
||||
DRM_ERROR("GuC init failed. Firmware loading disabled.\n");
|
||||
ret = -EIO;
|
||||
else
|
||||
} else {
|
||||
DRM_NOTE("GuC init failed. Firmware loading disabled.\n");
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (i915_modparams.enable_guc_submission) {
|
||||
i915_modparams.enable_guc_submission = 0;
|
||||
|
@ -256,7 +265,6 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
i915_modparams.enable_guc_loading = 0;
|
||||
DRM_NOTE("GuC firmware loading disabled\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "intel_uc_fw.h"
|
||||
#include "i915_drv.h"
|
||||
|
@ -45,26 +46,33 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
|
|||
size_t size;
|
||||
int err;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s fw fetch %s\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
|
||||
|
||||
if (!uc_fw->path)
|
||||
return;
|
||||
|
||||
uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
|
||||
|
||||
DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
|
||||
DRM_DEBUG_DRIVER("%s fw fetch %s\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
intel_uc_fw_status_repr(uc_fw->fetch_status));
|
||||
|
||||
err = request_firmware(&fw, uc_fw->path, &pdev->dev);
|
||||
if (err)
|
||||
goto fail;
|
||||
if (!fw)
|
||||
if (err) {
|
||||
DRM_DEBUG_DRIVER("%s fw request_firmware err=%d\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
|
||||
uc_fw->path, fw);
|
||||
DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), fw->size, fw);
|
||||
|
||||
/* Check the size of the blob before examining buffer contents */
|
||||
if (fw->size < sizeof(struct uc_css_header)) {
|
||||
DRM_NOTE("Firmware header is missing\n");
|
||||
DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
fw->size, sizeof(struct uc_css_header));
|
||||
err = -ENODATA;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -77,7 +85,9 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
|
|||
sizeof(u32);
|
||||
|
||||
if (uc_fw->header_size != sizeof(struct uc_css_header)) {
|
||||
DRM_NOTE("CSS header definition mismatch\n");
|
||||
DRM_WARN("%s: Mismatched firmware header definition\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type));
|
||||
err = -ENOEXEC;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -85,9 +95,20 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
|
|||
uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
|
||||
uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
|
||||
|
||||
/* Header and uCode will be loaded to WOPCM */
|
||||
size = uc_fw->header_size + uc_fw->ucode_size;
|
||||
if (size > intel_guc_wopcm_size(dev_priv)) {
|
||||
DRM_WARN("%s: Firmware is too large to fit in WOPCM\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type));
|
||||
err = -E2BIG;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* now RSA */
|
||||
if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
|
||||
DRM_NOTE("RSA key size is bad\n");
|
||||
DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw);
|
||||
err = -ENOEXEC;
|
||||
goto fail;
|
||||
}
|
||||
uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
|
||||
|
@ -96,7 +117,9 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
|
|||
/* At least, it should have header, uCode and RSA. Size of all three. */
|
||||
size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
|
||||
if (fw->size < size) {
|
||||
DRM_NOTE("Missing firmware components\n");
|
||||
DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), fw->size, size);
|
||||
err = -ENOEXEC;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -108,14 +131,6 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
switch (uc_fw->type) {
|
||||
case INTEL_UC_FW_TYPE_GUC:
|
||||
/* Header and uCode will be loaded to WOPCM. Size of the two. */
|
||||
size = uc_fw->header_size + uc_fw->ucode_size;
|
||||
|
||||
/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
|
||||
if (size > intel_guc_wopcm_size(dev_priv)) {
|
||||
DRM_ERROR("Firmware is too large to fit in WOPCM\n");
|
||||
goto fail;
|
||||
}
|
||||
uc_fw->major_ver_found = css->guc.sw_version >> 16;
|
||||
uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
|
||||
break;
|
||||
|
@ -126,17 +141,21 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
|
|||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
|
||||
err = -ENOEXEC;
|
||||
goto fail;
|
||||
MISSING_CASE(uc_fw->type);
|
||||
break;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
uc_fw->major_ver_found, uc_fw->minor_ver_found,
|
||||
uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
|
||||
|
||||
if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
|
||||
DRM_NOTE("Skipping %s firmware version check\n",
|
||||
DRM_NOTE("%s: Skipping firmware version check\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type));
|
||||
} else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
|
||||
uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
|
||||
DRM_NOTE("%s firmware version %d.%d, required %d.%d\n",
|
||||
DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
uc_fw->major_ver_found, uc_fw->minor_ver_found,
|
||||
uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
|
||||
|
@ -144,34 +163,115 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
|
||||
uc_fw->major_ver_found, uc_fw->minor_ver_found,
|
||||
uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
|
||||
|
||||
obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
DRM_DEBUG_DRIVER("%s fw object_create err=%d\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
uc_fw->obj = obj;
|
||||
uc_fw->size = fw->size;
|
||||
|
||||
DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
|
||||
uc_fw->obj);
|
||||
uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
|
||||
DRM_DEBUG_DRIVER("%s fw fetch %s\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
intel_uc_fw_status_repr(uc_fw->fetch_status));
|
||||
|
||||
release_firmware(fw);
|
||||
uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
|
||||
return;
|
||||
|
||||
fail:
|
||||
DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
|
||||
uc_fw->path, err);
|
||||
DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
|
||||
err, fw, uc_fw->obj);
|
||||
uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
|
||||
DRM_DEBUG_DRIVER("%s fw fetch %s\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
intel_uc_fw_status_repr(uc_fw->fetch_status));
|
||||
|
||||
DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
|
||||
DRM_INFO("%s: Firmware can be downloaded from %s\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
|
||||
|
||||
release_firmware(fw); /* OK even if fw is NULL */
|
||||
uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_uc_fw_upload - load uC firmware using custom loader
|
||||
*
|
||||
* @uc_fw: uC firmware
|
||||
* @loader: custom uC firmware loader function
|
||||
*
|
||||
* Loads uC firmware using custom loader and updates internal flags.
|
||||
*/
|
||||
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
|
||||
int (*xfer)(struct intel_uc_fw *uc_fw,
|
||||
struct i915_vma *vma))
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
int err;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s fw load %s\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
|
||||
|
||||
if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
|
||||
return -EIO;
|
||||
|
||||
uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
|
||||
DRM_DEBUG_DRIVER("%s fw load %s\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
intel_uc_fw_status_repr(uc_fw->load_status));
|
||||
|
||||
/* Pin object with firmware */
|
||||
err = i915_gem_object_set_to_gtt_domain(uc_fw->obj, false);
|
||||
if (err) {
|
||||
DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
|
||||
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
|
||||
if (IS_ERR(vma)) {
|
||||
err = PTR_ERR(vma);
|
||||
DRM_DEBUG_DRIVER("%s fw ggtt-pin err=%d\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Call custom loader */
|
||||
err = xfer(uc_fw, vma);
|
||||
|
||||
/*
|
||||
* We keep the object pages for reuse during resume. But we can unpin it
|
||||
* now that DMA has completed, so it doesn't continue to take up space.
|
||||
*/
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
|
||||
DRM_DEBUG_DRIVER("%s fw load %s\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
intel_uc_fw_status_repr(uc_fw->load_status));
|
||||
|
||||
DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
uc_fw->path,
|
||||
uc_fw->major_ver_found, uc_fw->minor_ver_found);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
uc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
|
||||
DRM_DEBUG_DRIVER("%s fw load %s\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
intel_uc_fw_status_repr(uc_fw->load_status));
|
||||
|
||||
DRM_WARN("%s: Failed to load firmware %s (error %d)\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -191,3 +291,28 @@ void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
|
|||
|
||||
uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_uc_fw_dump - dump information about uC firmware
|
||||
* @uc_fw: uC firmware
|
||||
* @p: the &drm_printer
|
||||
*
|
||||
* Pretty printer for uC firmware.
|
||||
*/
|
||||
void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p)
|
||||
{
|
||||
drm_printf(p, "%s firmware: %s\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
|
||||
drm_printf(p, "\tstatus: fetch %s, load %s\n",
|
||||
intel_uc_fw_status_repr(uc_fw->fetch_status),
|
||||
intel_uc_fw_status_repr(uc_fw->load_status));
|
||||
drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
|
||||
uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
|
||||
uc_fw->major_ver_found, uc_fw->minor_ver_found);
|
||||
drm_printf(p, "\theader: offset %u, size %u\n",
|
||||
uc_fw->header_offset, uc_fw->header_size);
|
||||
drm_printf(p, "\tuCode: offset %u, size %u\n",
|
||||
uc_fw->ucode_offset, uc_fw->ucode_size);
|
||||
drm_printf(p, "\tRSA: offset %u, size %u\n",
|
||||
uc_fw->rsa_offset, uc_fw->rsa_size);
|
||||
}
|
||||
|
|
|
@ -25,7 +25,12 @@
|
|||
#ifndef _INTEL_UC_FW_H_
|
||||
#define _INTEL_UC_FW_H_
|
||||
|
||||
struct drm_printer;
|
||||
struct drm_i915_private;
|
||||
struct i915_vma;
|
||||
|
||||
/* Home of GuC, HuC and DMC firmwares */
|
||||
#define INTEL_UC_FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
|
||||
|
||||
enum intel_uc_fw_status {
|
||||
INTEL_UC_FIRMWARE_FAIL = -1,
|
||||
|
@ -50,6 +55,11 @@ struct intel_uc_fw {
|
|||
enum intel_uc_fw_status fetch_status;
|
||||
enum intel_uc_fw_status load_status;
|
||||
|
||||
/*
|
||||
* The firmware build process will generate a version header file with major and
|
||||
* minor version defined. The versions are built into CSS header of firmware.
|
||||
* i915 kernel driver set the minimal firmware version required per platform.
|
||||
*/
|
||||
u16 major_ver_wanted;
|
||||
u16 minor_ver_wanted;
|
||||
u16 major_ver_found;
|
||||
|
@ -102,6 +112,10 @@ void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
|
|||
|
||||
void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
|
||||
struct intel_uc_fw *uc_fw);
|
||||
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
|
||||
int (*xfer)(struct intel_uc_fw *uc_fw,
|
||||
struct i915_vma *vma));
|
||||
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
|
||||
void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1403,6 +1403,9 @@ static void i915_stop_engines(struct drm_i915_private *dev_priv,
|
|||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 3)
|
||||
return;
|
||||
|
||||
for_each_engine_masked(engine, dev_priv, engine_mask, id)
|
||||
gen3_stop_engine(engine);
|
||||
}
|
||||
|
@ -1742,16 +1745,12 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
|
|||
|
||||
int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
|
||||
{
|
||||
reset_func reset;
|
||||
reset_func reset = intel_get_gpu_reset(dev_priv);
|
||||
int retry;
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
|
||||
reset = intel_get_gpu_reset(dev_priv);
|
||||
if (reset == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
/* If the power well sleeps during the reset, the reset
|
||||
* request may be dropped and never completes (causing -EIO).
|
||||
*/
|
||||
|
@ -1771,7 +1770,9 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
|
|||
*/
|
||||
i915_stop_engines(dev_priv, engine_mask);
|
||||
|
||||
ret = reset(dev_priv, engine_mask);
|
||||
ret = -ENODEV;
|
||||
if (reset)
|
||||
ret = reset(dev_priv, engine_mask);
|
||||
if (ret != -ETIMEDOUT)
|
||||
break;
|
||||
|
||||
|
|
|
@ -306,6 +306,14 @@ struct bdb_general_features {
|
|||
|
||||
#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
|
||||
|
||||
/* DDC Bus DDI Type 155+ */
|
||||
enum vbt_gmbus_ddi {
|
||||
DDC_BUS_DDI_B = 0x1,
|
||||
DDC_BUS_DDI_C,
|
||||
DDC_BUS_DDI_D,
|
||||
DDC_BUS_DDI_F,
|
||||
};
|
||||
|
||||
/*
|
||||
* The child device config, aka the display device data structure, provides a
|
||||
* description of a port and its configuration on the platform.
|
||||
|
|
|
@ -609,7 +609,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
|
|||
bool single = false;
|
||||
LIST_HEAD(objects);
|
||||
IGT_TIMEOUT(end_time);
|
||||
int err;
|
||||
int err = -ENODEV;
|
||||
|
||||
for_each_prime_number_from(page_num, 1, max_pages) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
@ -1157,7 +1157,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
|
|||
unsigned int size_mask;
|
||||
unsigned int page_mask;
|
||||
int n, i;
|
||||
int err;
|
||||
int err = -ENODEV;
|
||||
|
||||
/*
|
||||
* Sanity check creating objects with a varying mix of page sizes --
|
||||
|
|
|
@ -417,7 +417,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
list_for_each_entry(obj, &i915->mm.bound_list, global_link) {
|
||||
list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
|
||||
|
|
|
@ -24,6 +24,9 @@
|
|||
|
||||
#include "../i915_selftest.h"
|
||||
|
||||
#include "lib_sw_fence.h"
|
||||
#include "mock_context.h"
|
||||
#include "mock_drm.h"
|
||||
#include "mock_gem_device.h"
|
||||
|
||||
static int populate_ggtt(struct drm_i915_private *i915)
|
||||
|
@ -47,7 +50,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
|
|||
|
||||
if (!list_empty(&i915->mm.unbound_list)) {
|
||||
size = 0;
|
||||
list_for_each_entry(obj, &i915->mm.unbound_list, global_link)
|
||||
list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
|
||||
size++;
|
||||
|
||||
pr_err("Found %lld objects unbound!\n", size);
|
||||
|
@ -74,10 +77,10 @@ static void cleanup_objects(struct drm_i915_private *i915)
|
|||
{
|
||||
struct drm_i915_gem_object *obj, *on;
|
||||
|
||||
list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, global_link)
|
||||
list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link)
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
list_for_each_entry_safe(obj, on, &i915->mm.bound_list, global_link)
|
||||
list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link)
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
@ -149,8 +152,6 @@ static int igt_overcommit(void *arg)
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
list_move(&obj->global_link, &i915->mm.unbound_list);
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
|
||||
if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
|
||||
pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
|
||||
|
@ -325,6 +326,148 @@ static int igt_evict_vm(void *arg)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int igt_evict_contexts(void *arg)
|
||||
{
|
||||
const u64 PRETEND_GGTT_SIZE = 16ull << 20;
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
struct reserved {
|
||||
struct drm_mm_node node;
|
||||
struct reserved *next;
|
||||
} *reserved = NULL;
|
||||
struct drm_mm_node hole;
|
||||
unsigned long count;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* The purpose of this test is to verify that we will trigger an
|
||||
* eviction in the GGTT when constructing a request that requires
|
||||
* additional space in the GGTT for pinning the context. This space
|
||||
* is not directly tied to the request so reclaiming it requires
|
||||
* extra work.
|
||||
*
|
||||
* As such this test is only meaningful for full-ppgtt environments
|
||||
* where the GTT space of the request is separate from the GGTT
|
||||
* allocation required to build the request.
|
||||
*/
|
||||
if (!USES_FULL_PPGTT(i915))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
|
||||
/* Reserve a block so that we know we have enough to fit a few rq */
|
||||
memset(&hole, 0, sizeof(hole));
|
||||
err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
|
||||
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
|
||||
0, i915->ggtt.base.total,
|
||||
PIN_NOEVICT);
|
||||
if (err)
|
||||
goto out_locked;
|
||||
|
||||
/* Make the GGTT appear small by filling it with unevictable nodes */
|
||||
count = 0;
|
||||
do {
|
||||
struct reserved *r;
|
||||
|
||||
r = kcalloc(1, sizeof(*r), GFP_KERNEL);
|
||||
if (!r) {
|
||||
err = -ENOMEM;
|
||||
goto out_locked;
|
||||
}
|
||||
|
||||
if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
|
||||
1ul << 20, 0, I915_COLOR_UNEVICTABLE,
|
||||
0, i915->ggtt.base.total,
|
||||
PIN_NOEVICT)) {
|
||||
kfree(r);
|
||||
break;
|
||||
}
|
||||
|
||||
r->next = reserved;
|
||||
reserved = r;
|
||||
|
||||
count++;
|
||||
} while (1);
|
||||
drm_mm_remove_node(&hole);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
|
||||
|
||||
/* Overfill the GGTT with context objects and so try to evict one. */
|
||||
for_each_engine(engine, i915, id) {
|
||||
struct i915_sw_fence fence;
|
||||
struct drm_file *file;
|
||||
|
||||
file = mock_file(i915);
|
||||
if (IS_ERR(file))
|
||||
return PTR_ERR(file);
|
||||
|
||||
count = 0;
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
onstack_fence_init(&fence);
|
||||
do {
|
||||
struct drm_i915_gem_request *rq;
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
ctx = live_context(i915, file);
|
||||
if (!ctx)
|
||||
break;
|
||||
|
||||
/* We will need some GGTT space for the rq's context */
|
||||
igt_evict_ctl.fail_if_busy = true;
|
||||
rq = i915_gem_request_alloc(engine, ctx);
|
||||
igt_evict_ctl.fail_if_busy = false;
|
||||
|
||||
if (IS_ERR(rq)) {
|
||||
/* When full, fail_if_busy will trigger EBUSY */
|
||||
if (PTR_ERR(rq) != -EBUSY) {
|
||||
pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
|
||||
ctx->hw_id, engine->name,
|
||||
(int)PTR_ERR(rq));
|
||||
err = PTR_ERR(rq);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* Keep every request/ctx pinned until we are full */
|
||||
err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
|
||||
&fence,
|
||||
GFP_KERNEL);
|
||||
if (err < 0)
|
||||
break;
|
||||
|
||||
i915_add_request(rq);
|
||||
count++;
|
||||
err = 0;
|
||||
} while(1);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
onstack_fence_fini(&fence);
|
||||
pr_info("Submitted %lu contexts/requests on %s\n",
|
||||
count, engine->name);
|
||||
|
||||
mock_file_free(i915, file);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
out_locked:
|
||||
while (reserved) {
|
||||
struct reserved *next = reserved->next;
|
||||
|
||||
drm_mm_remove_node(&reserved->node);
|
||||
kfree(reserved);
|
||||
|
||||
reserved = next;
|
||||
}
|
||||
if (drm_mm_node_allocated(&hole))
|
||||
drm_mm_remove_node(&hole);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int i915_gem_evict_mock_selftests(void)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
|
@ -348,3 +491,12 @@ int i915_gem_evict_mock_selftests(void)
|
|||
drm_dev_unref(&i915->drm);
|
||||
return err;
|
||||
}
|
||||
|
||||
int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(igt_evict_contexts),
|
||||
};
|
||||
|
||||
return i915_subtests(tests, i915);
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ selftest(objects, i915_gem_object_live_selftests)
|
|||
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
|
||||
selftest(coherency, i915_gem_coherency_live_selftests)
|
||||
selftest(gtt, i915_gem_gtt_live_selftests)
|
||||
selftest(evict, i915_gem_evict_live_selftests)
|
||||
selftest(hugepages, i915_gem_huge_page_live_selftests)
|
||||
selftest(contexts, i915_gem_context_live_selftests)
|
||||
selftest(hangcheck, intel_hangcheck_live_selftests)
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include <linux/completion.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/prime_numbers.h>
|
||||
|
||||
#include "../i915_selftest.h"
|
||||
|
||||
|
@ -565,6 +566,46 @@ static int test_ipc(void *arg)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int test_timer(void *arg)
|
||||
{
|
||||
unsigned long target, delay;
|
||||
struct timed_fence tf;
|
||||
|
||||
timed_fence_init(&tf, target = jiffies);
|
||||
if (!i915_sw_fence_done(&tf.fence)) {
|
||||
pr_err("Fence with immediate expiration not signaled\n");
|
||||
goto err;
|
||||
}
|
||||
timed_fence_fini(&tf);
|
||||
|
||||
for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
|
||||
timed_fence_init(&tf, target = jiffies + delay);
|
||||
if (i915_sw_fence_done(&tf.fence)) {
|
||||
pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
|
||||
goto err;
|
||||
}
|
||||
|
||||
i915_sw_fence_wait(&tf.fence);
|
||||
if (!i915_sw_fence_done(&tf.fence)) {
|
||||
pr_err("Fence not signaled after wait\n");
|
||||
goto err;
|
||||
}
|
||||
if (time_before(jiffies, target)) {
|
||||
pr_err("Fence signaled too early, target=%lu, now=%lu\n",
|
||||
target, jiffies);
|
||||
goto err;
|
||||
}
|
||||
|
||||
timed_fence_fini(&tf);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
timed_fence_fini(&tf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int i915_sw_fence_mock_selftests(void)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
|
@ -576,6 +617,7 @@ int i915_sw_fence_mock_selftests(void)
|
|||
SUBTEST(test_C_AB),
|
||||
SUBTEST(test_chain),
|
||||
SUBTEST(test_ipc),
|
||||
SUBTEST(test_timer),
|
||||
};
|
||||
|
||||
return i915_subtests(tests, NULL);
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Copyright © 2017 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "lib_sw_fence.h"
|
||||
|
||||
/* Small library of different fence types useful for writing tests */
|
||||
|
||||
static int __i915_sw_fence_call
|
||||
nop_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
|
||||
{
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
void __onstack_fence_init(struct i915_sw_fence *fence,
|
||||
const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
debug_fence_init_onstack(fence);
|
||||
|
||||
__init_waitqueue_head(&fence->wait, name, key);
|
||||
atomic_set(&fence->pending, 1);
|
||||
fence->flags = (unsigned long)nop_fence_notify;
|
||||
}
|
||||
|
||||
void onstack_fence_fini(struct i915_sw_fence *fence)
|
||||
{
|
||||
i915_sw_fence_commit(fence);
|
||||
i915_sw_fence_fini(fence);
|
||||
}
|
||||
|
||||
static void timed_fence_wake(unsigned long data)
|
||||
{
|
||||
struct timed_fence *tf = (struct timed_fence *)data;
|
||||
|
||||
i915_sw_fence_commit(&tf->fence);
|
||||
}
|
||||
|
||||
void timed_fence_init(struct timed_fence *tf, unsigned long expires)
|
||||
{
|
||||
onstack_fence_init(&tf->fence);
|
||||
|
||||
setup_timer_on_stack(&tf->timer, timed_fence_wake, (unsigned long)tf);
|
||||
|
||||
if (time_after(expires, jiffies))
|
||||
mod_timer(&tf->timer, expires);
|
||||
else
|
||||
i915_sw_fence_commit(&tf->fence);
|
||||
}
|
||||
|
||||
void timed_fence_fini(struct timed_fence *tf)
|
||||
{
|
||||
if (del_timer_sync(&tf->timer))
|
||||
i915_sw_fence_commit(&tf->fence);
|
||||
|
||||
destroy_timer_on_stack(&tf->timer);
|
||||
i915_sw_fence_fini(&tf->fence);
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* lib_sw_fence.h - library routines for testing N:M synchronisation points
|
||||
*
|
||||
* Copyright (C) 2017 Intel Corporation
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _LIB_SW_FENCE_H_
|
||||
#define _LIB_SW_FENCE_H_
|
||||
|
||||
#include <linux/timer.h>
|
||||
|
||||
#include "../i915_sw_fence.h"
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define onstack_fence_init(fence) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__onstack_fence_init((fence), #fence, &__key); \
|
||||
} while (0)
|
||||
#else
|
||||
#define onstack_fence_init(fence) \
|
||||
__onstack_fence_init((fence), NULL, NULL)
|
||||
#endif
|
||||
|
||||
void __onstack_fence_init(struct i915_sw_fence *fence,
|
||||
const char *name,
|
||||
struct lock_class_key *key);
|
||||
void onstack_fence_fini(struct i915_sw_fence *fence);
|
||||
|
||||
struct timed_fence {
|
||||
struct i915_sw_fence fence;
|
||||
struct timer_list timer;
|
||||
};
|
||||
|
||||
void timed_fence_init(struct timed_fence *tf, unsigned long expires);
|
||||
void timed_fence_fini(struct timed_fence *tf);
|
||||
|
||||
#endif /* _LIB_SW_FENCE_H_ */
|
|
@ -73,11 +73,7 @@ mock_context(struct drm_i915_private *i915,
|
|||
|
||||
void mock_context_close(struct i915_gem_context *ctx)
|
||||
{
|
||||
i915_gem_context_set_closed(ctx);
|
||||
|
||||
i915_ppgtt_close(&ctx->ppgtt->base);
|
||||
|
||||
i915_gem_context_put(ctx);
|
||||
context_close(ctx);
|
||||
}
|
||||
|
||||
void mock_init_contexts(struct drm_i915_private *i915)
|
||||
|
|
|
@ -32,9 +32,9 @@ static struct mock_request *first_request(struct mock_engine *engine)
|
|||
link);
|
||||
}
|
||||
|
||||
static void hw_delay_complete(unsigned long data)
|
||||
static void hw_delay_complete(struct timer_list *t)
|
||||
{
|
||||
struct mock_engine *engine = (typeof(engine))data;
|
||||
struct mock_engine *engine = from_timer(engine, t, hw_delay);
|
||||
struct mock_request *request;
|
||||
|
||||
spin_lock(&engine->hw_lock);
|
||||
|
@ -161,9 +161,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
|
|||
|
||||
/* fake hw queue */
|
||||
spin_lock_init(&engine->hw_lock);
|
||||
setup_timer(&engine->hw_delay,
|
||||
hw_delay_complete,
|
||||
(unsigned long)engine);
|
||||
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
|
||||
INIT_LIST_HEAD(&engine->hw_queue);
|
||||
|
||||
return &engine->base;
|
||||
|
|
Loading…
Reference in New Issue